code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Dict = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class UpperCamelCase ( __lowercase ):
"""simple docstring"""
A : str = "data2vec-vision"
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[Any]=7_6_8 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : Any=1_2 , UpperCAmelCase_ : Optional[int]=3_0_7_2 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[int]=1e-12 , UpperCAmelCase_ : int=2_2_4 , UpperCAmelCase_ : Optional[Any]=1_6 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]=[3, 5, 7, 1_1] , UpperCAmelCase_ : Any=[1, 2, 3, 6] , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.4 , UpperCAmelCase_ : Tuple=2_5_6 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=2_5_5 , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**snake_case_)
a : List[str] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : List[str] = intermediate_size
a : Optional[int] = hidden_act
a : int = hidden_dropout_prob
a : List[str] = attention_probs_dropout_prob
a : Any = initializer_range
a : str = layer_norm_eps
a : List[str] = image_size
a : Optional[Any] = patch_size
a : int = num_channels
a : Tuple = use_mask_token
a : List[Any] = use_absolute_position_embeddings
a : Tuple = use_relative_position_bias
a : Any = use_shared_relative_position_bias
a : Dict = layer_scale_init_value
a : int = drop_path_rate
a : Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
a : Union[str, Any] = out_indices
a : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
a : List[Any] = use_auxiliary_head
a : List[Any] = auxiliary_loss_weight
a : str = auxiliary_channels
a : int = auxiliary_num_convs
a : Union[str, Any] = auxiliary_concat_input
a : Dict = semantic_loss_ignore_index
class UpperCamelCase ( __lowercase ):
"""simple docstring"""
A : Optional[int] = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return 1e-4
| 352 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple=1_3 , UpperCAmelCase_ : List[str]=3_2 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : List[str]=1_6 , UpperCAmelCase_ : int=[3_2, 6_4, 1_2_8] , UpperCAmelCase_ : Optional[int]=[1, 2, 1] , UpperCAmelCase_ : int=[2, 2, 4] , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Dict=2.0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=1e-5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : Union[str, Any]=["stage1", "stage2"] , UpperCAmelCase_ : List[Any]=[1, 2] , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[int] = batch_size
a : Optional[Any] = image_size
a : Tuple = patch_size
a : List[str] = num_channels
a : str = embed_dim
a : Any = hidden_sizes
a : Dict = depths
a : str = num_heads
a : List[Any] = window_size
a : Optional[Any] = mlp_ratio
a : List[str] = qkv_bias
a : str = hidden_dropout_prob
a : int = attention_probs_dropout_prob
a : Tuple = drop_path_rate
a : Optional[Any] = hidden_act
a : Dict = use_absolute_embeddings
a : List[Any] = patch_norm
a : Optional[Any] = layer_norm_eps
a : str = initializer_range
a : str = is_training
a : Optional[int] = scope
a : Tuple = use_labels
a : Optional[Any] = type_sequence_label_size
a : Union[str, Any] = encoder_stride
a : Tuple = out_features
a : int = out_indices
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : Optional[Any] = None
if self.use_labels:
a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Tuple = FocalNetModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
a : str = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : Tuple = FocalNetBackbone(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
a : Optional[int] = None
a : int = FocalNetBackbone(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : str = FocalNetForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : Optional[Any] = 1
a : int = FocalNetForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : str = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.type_sequence_label_size
a : List[str] = FocalNetForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : Tuple = 1
a : List[Any] = FocalNetForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Any = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
a , a , a : Optional[int] = config_and_inputs
a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Optional[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A : List[Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : str = False
A : List[str] = False
A : Any = False
A : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[Any] = FocalNetModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=3_7 , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@unittest.skip(reason='FocalNet does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking')
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
a : int = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
a : List[str] = model_class(UpperCAmelCase_)
a : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Any = [*signature.parameters.keys()]
a : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : Any = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Tuple = outputs.hidden_states
a : int = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# FocalNet has a different seq_length
a : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
a : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
a : int = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
a , a , a , a : Optional[int] = reshaped_hidden_states[0].shape
a : Union[str, Any] = (
reshaped_hidden_states[0].view(UpperCAmelCase_ , UpperCAmelCase_ , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
a : List[str] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Union[str, Any] = 3
a : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
a : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
a : str = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Dict = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width))
@slow
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Any = FocalNetModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a , a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = _config_zero_init(UpperCAmelCase_)
for model_class in self.all_model_classes:
a : Dict = model_class(config=UpperCAmelCase_)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[int] = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny').to(UpperCAmelCase_)
a : int = self.default_image_processor
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a : Optional[Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : Any = model(**UpperCAmelCase_)
# verify the logits
a : str = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : str = torch.tensor([0.21_66, -0.43_68, 0.21_91]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 2_8_1)
@require_torch
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = (FocalNetBackbone,) if is_torch_available() else ()
A : Optional[Any] = FocalNetConfig
A : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[str] = FocalNetModelTester(self)
| 353 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCamelCase : Tuple = TypeVar("""T""")
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> List[str]:
"""simple docstring"""
return (position - 1) // 2
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> Dict:
"""simple docstring"""
return (2 * position) + 1
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> Any:
"""simple docstring"""
return (2 * position) + 2
class UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any]):
"""simple docstring"""
a : Optional[Any] = []
a : List[str] = {}
a : Tuple = 0
def __len__( self : List[str]):
"""simple docstring"""
return self.elements
def __repr__( self : Dict):
"""simple docstring"""
return str(self.heap)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return self.elements == 0
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.heap.append((elem, weight))
a : str = self.elements
self.elements += 1
self._bubble_up(a__)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1)
a , a : Dict = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
a , a : int = self.heap[0]
self._bubble_down(a__)
return elem
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.position_map[elem]
a : Tuple = (elem, weight)
if position > 0:
a : str = get_parent_position(a__)
a , a : str = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(a__)
else:
self._bubble_down(a__)
else:
self._bubble_down(a__)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : str):
"""simple docstring"""
a : str = self.position_map[elem]
if curr_pos == 0:
return None
a : List[str] = get_parent_position(a__)
a , a : Dict = self.heap[curr_pos]
a , a : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(a__ , a__)
return self._bubble_up(a__)
return None
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : int = self.position_map[elem]
a , a : List[str] = self.heap[curr_pos]
a : Tuple = get_child_left_position(a__)
a : Dict = get_child_right_position(a__)
if child_left_position < self.elements and child_right_position < self.elements:
a , a : List[str] = self.heap[child_left_position]
a , a : Dict = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(a__ , a__)
return self._bubble_down(a__)
if child_left_position < self.elements:
a , a : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(a__ , a__)
return self._bubble_down(a__)
else:
return None
if child_right_position < self.elements:
a , a : Tuple = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(a__ , a__)
return self._bubble_down(a__)
return None
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
"""simple docstring"""
a : List[Any] = self.heap[nodea_pos][0]
a : Optional[int] = self.heap[nodea_pos][0]
a , a : str = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
a : List[Any] = nodea_pos
a : Optional[Any] = nodea_pos
class UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple):
"""simple docstring"""
a : int = {}
a : int = 0
def __repr__( self : Tuple):
"""simple docstring"""
return str(self.connections)
def __len__( self : Union[str, Any]):
"""simple docstring"""
return self.nodes
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
if node not in self.connections:
a : Any = {}
self.nodes += 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
self.add_node(a__)
self.add_node(a__)
a : Dict = weight
a : Tuple = weight
def SCREAMING_SNAKE_CASE__ ( snake_case : GraphUndirectedWeighted[T] , ) -> List[Any]:
"""simple docstring"""
a : int = {node: maxsize for node in graph.connections}
a : Dict = {node: None for node in graph.connections}
a : Any = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(snake_case , snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
a : int = priority_queue.extract_min()
a : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case , dist[neighbour] )
a : str = node
# running prim's algorithm
while not priority_queue.is_empty():
a : str = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a : Optional[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(snake_case , dist[neighbour] )
a : Optional[int] = node
return dist, parent
| 354 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : str ) -> str:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a : str = flax_key_tuple[:-1] + ('weight',)
a : Optional[int] = torch.permute(snake_case , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case ):
# linear layer
a : str = flax_key_tuple[:-1] + ('weight',)
a : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a : Optional[int] = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Dict , snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
if "metadata" in layer:
a : Union[str, Any] = layer.split('metadata' )
a : Union[str, Any] = ''.join(split_layer[0] )[:-1]
a : List[str] = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
a : Tuple = layer.split('kvstore' )
a : Tuple = ''.join(split_layer[0] )[:-1]
a : Any = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
a : List[str] = layer.split('/' )
a : Optional[int] = '/'.join(split_layer[:-1] )
a : Union[str, Any] = (split_layer[-1],)
if "kvstore/path" in layer:
a : List[str] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
a : Tuple = 'file'
else:
a : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Tuple ) -> int:
"""simple docstring"""
a : List[str] = rename_keys(snake_case )
a : Any = {}
for k, v in current_block.items():
a : Optional[Any] = v
a : Union[str, Any] = new_current_block
torch.save(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Optional[int] , snake_case : Tuple = WEIGHTS_NAME ) -> Union[str, Any]:
"""simple docstring"""
a : Tuple = convert_file_size_to_int(snake_case )
a : Optional[int] = []
a : int = {}
a : Dict = 0
a : Any = 0
os.makedirs(snake_case , exist_ok=snake_case )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
a : Union[str, Any] = serialization.msgpack_restore(fp.read() )['optimizer']['target']
a : str = flatten_dict(snake_case , sep='/' )
a : List[str] = {}
for layer in checkpoint_info.keys():
a , a , a : Optional[int] = get_key_and_tensorstore_dict(
snake_case , snake_case , snake_case )
if curr_real_layer_name in all_layers:
a : Union[str, Any] = content
else:
a : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a : Union[str, Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a : List[str] = torch.tensor(snake_case )
a : Any = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a : Optional[int] = rename_base_flax_keys(tuple(key.split('/' ) ) , snake_case )
a : str = '/'.join(snake_case )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a : Optional[int] = os.path.join(
snake_case , weights_name.replace('.bin' , F"""-{len(snake_case )+1:05d}-of-???.bin""" ) )
rename_and_save_block(snake_case , snake_case )
sharded_state_dicts.append(current_block.keys() )
del current_block
a : List[str] = {}
a : str = 0
a : List[str] = raw_weights.to(getattr(snake_case , snake_case ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a : List[str] = os.path.join(snake_case , weights_name.replace('.bin' , F"""-{len(snake_case )+1:05d}-of-???.bin""" ) )
rename_and_save_block(snake_case , snake_case )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a : List[str] = {}
a : int = {}
for idx, shard in enumerate(snake_case ):
a : List[str] = weights_name.replace(
'.bin' , F"""-{idx+1:05d}-of-{len(snake_case ):05d}.bin""" ) # len(sharded_state_dicts):05d}
a : Union[str, Any] = os.path.join(snake_case , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(snake_case , os.path.join(snake_case , snake_case ) )
a : List[Any] = shard
for key in shard:
a : List[str] = shard_file
# Add the metadata
a : Any = {'total_size': total_size}
a : str = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(snake_case , snake_case ) , 'w' , encoding='utf-8' ) as f:
a : int = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + '\n'
f.write(snake_case )
return metadata, index
if __name__ == "__main__":
UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
UpperCamelCase : Union[str, Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def SCREAMING_SNAKE_CASE__ ( ) -> int:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a : List[Any] = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
a : Tuple = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
a : Optional[int] = TaTokenizer.from_pretrained('t5-small' )
a : Dict = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
a : List[str] = tokenizer(snake_case , return_tensors='pt' ).input_ids
a : int = model.generate(snake_case , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 355 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCamelCase : List[str] = pytest.mark.integration
@require_faiss
class UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Tuple = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__lowerCamelCase) for x in np.arange(3_0).tolist()]})
return dset
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
import faiss
a : Dataset = self._create_dummy_dataset()
a : Dict = dset.map(
lambda UpperCAmelCase_ , UpperCAmelCase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase)
a : str = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
a : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
import faiss
a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a : Union[str, Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
import faiss
a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
a : List[Any] = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
from elasticsearch import Elasticsearch
a : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
a : Optional[Any] = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 3_0)
a : List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 2_9}]}}
a : Optional[int] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__lowerCamelCase)
a : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
import faiss
a : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
a : Union[str, Any] = np.zeros(5 , dtype=np.floataa)
a : str = 1
a : Any = index.search(__lowerCamelCase)
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
a : Optional[Any] = np.eye(5 , dtype=np.floataa)[::-1]
a : List[Any] = index.search_batch(__lowerCamelCase)
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0])
a : Union[str, Any] = [scores[0] for scores in total_scores]
a : Any = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
import faiss
a : List[str] = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
a : Union[str, Any] = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(__lowerCamelCase):
a : int = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
import faiss
a : Any = faiss.IndexFlat(5)
a : Tuple = FaissIndex(custom_index=__lowerCamelCase)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
import faiss
a : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase) as tmp_file:
index.save(tmp_file.name)
a : Any = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
a : List[Any] = np.zeros(5 , dtype=np.floataa)
a : Any = 1
a : List[str] = index.search(__lowerCamelCase)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> List[Any]:
"""simple docstring"""
import faiss
a : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a : Union[str, Any] = '''index.faiss'''
a : List[Any] = F"""mock://{index_name}"""
index.save(lowerCamelCase_ , storage_options=mockfs.storage_options )
a : Optional[Any] = FaissIndex.load(lowerCamelCase_ , storage_options=mockfs.storage_options )
a : Any = np.zeros(5 , dtype=np.floataa )
a : Optional[Any] = 1
a : Tuple = index.search(lowerCamelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
a : Any = Elasticsearch()
a : Tuple = {'''acknowledged''': True}
a : Union[str, Any] = ElasticSearchIndex(es_client=__lowerCamelCase)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
a : Optional[int] = '''foo'''
a : Optional[int] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a : Optional[Any] = index.search(__lowerCamelCase)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
a : Optional[Any] = '''foo'''
a : Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a : Tuple = index.search(__lowerCamelCase , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
a : List[str] = ['''foo''', '''bar''', '''foobar''']
a : List[Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a : Dict = index.search_batch(__lowerCamelCase)
a : Dict = [scores[0] for scores in total_scores]
a : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase) , 0)
self.assertListEqual([1, 1, 1] , __lowerCamelCase)
# batched queries with timeout
a : Optional[Any] = ['''foo''', '''bar''', '''foobar''']
a : Any = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a : Dict = index.search_batch(__lowerCamelCase , request_timeout=3_0)
a : List[Any] = [scores[0] for scores in total_scores]
a : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase) , 0)
self.assertListEqual([1, 1, 1] , __lowerCamelCase)
| 356 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase_ : pyspark.sql.DataFrame , UpperCAmelCase_ : Optional[NamedSplit] = None , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "arrow" , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , **_A , )
a : Any = load_from_cache_file
a : str = file_format
a : Optional[int] = Spark(
df=_A , features=_A , cache_dir=_A , working_dir=_A , **_A , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
a : Tuple = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_A , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 357 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : int , snake_case : Tuple ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_A ) )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple , snake_case : Dict , snake_case : str ):
"""simple docstring"""
# Base Case
if index == len(_A ):
return True
# Recursive Step
for i in range(_A ):
if valid_coloring(graph[index] , _A , _A ):
# Color current vertex
a : int = i
# Validate coloring
if util_color(_A , _A , _A , index + 1 ):
return True
# Backtrack
a : Union[str, Any] = -1
return False
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : List[str] ):
"""simple docstring"""
a : Optional[int] = [-1] * len(_A )
if util_color(_A , _A , _A , 0 ):
return colored_vertices
return []
| 358 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : str = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
UpperCamelCase : Tuple = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
UpperCamelCase : Optional[int] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Union[str, Any]=-1 , UpperCAmelCase_ : int=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : List[Any]=5_0_0 , UpperCAmelCase_ : List[Any]="gpt2-large" , UpperCAmelCase_ : Optional[Any]=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : Union[str, Any]=2_5 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=2_5 , ):
"""simple docstring"""
a : Optional[int] = compute_mauve(
p_text=A__ , q_text=A__ , p_features=A__ , q_features=A__ , p_tokens=A__ , q_tokens=A__ , num_buckets=A__ , pca_max_data=A__ , kmeans_explained_var=A__ , kmeans_num_redo=A__ , kmeans_max_iter=A__ , featurize_model_name=A__ , device_id=A__ , max_text_length=A__ , divergence_curve_discretization_size=A__ , mauve_scaling_factor=A__ , verbose=A__ , seed=A__ , )
return out
| 359 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 0 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCamelCase ( __a , __a ):
"""simple docstring"""
A : Optional[Any] = 1
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase_ : int = 1_0_0_0 , UpperCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None):
"""simple docstring"""
self.set_timesteps(a__)
# standard deviation of the initial noise distribution
a : Dict = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a : Optional[int] = 4
# running values
a : List[str] = []
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, torch.device] = None):
"""simple docstring"""
a : Union[str, Any] = num_inference_steps
a : str = torch.linspace(1 , 0 , num_inference_steps + 1)[:-1]
a : int = torch.cat([steps, torch.tensor([0.0])])
if self.config.trained_betas is not None:
a : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa)
else:
a : Optional[int] = torch.sin(steps * math.pi / 2) ** 2
a : int = (1.0 - self.betas**2) ** 0.5
a : Optional[Any] = (torch.atana(self.betas , self.alphas) / math.pi * 2)[:-1]
a : Optional[Any] = timesteps.to(a__)
a : Dict = []
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler')
a : List[Any] = (self.timesteps == timestep).nonzero().item()
a : str = timestep_index + 1
a : Any = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(a__)
if len(self.ets) == 1:
a : Tuple = self.ets[-1]
elif len(self.ets) == 2:
a : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
a : int = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
a : Dict = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
a : int = self._get_prev_sample(a__ , a__ , a__ , a__)
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__)
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : torch.FloatTensor , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Optional[int] = self.alphas[timestep_index]
a : Optional[int] = self.betas[timestep_index]
a : Union[str, Any] = self.alphas[prev_timestep_index]
a : int = self.betas[prev_timestep_index]
a : Any = (sample - sigma * ets) / max(a__ , 1e-8)
a : Dict = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int):
"""simple docstring"""
return self.config.num_train_timesteps
| 360 | '''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
UpperCamelCase : List[Any] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
UpperCamelCase : List[Any] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
a : str = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=snake_case )[0]
@deprecated(snake_case , 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=snake_case ) as bytestream:
a : int = _readaa(snake_case )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
a : Tuple = _readaa(snake_case )
a : Optional[Any] = _readaa(snake_case )
a : int = _readaa(snake_case )
a : List[str] = bytestream.read(rows * cols * num_images )
a : Optional[int] = numpy.frombuffer(snake_case , dtype=numpy.uinta )
a : Optional[Any] = data.reshape(snake_case , snake_case , snake_case , 1 )
return data
@deprecated(snake_case , 'Please use tf.one_hot on tensors.' )
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
a : int = labels_dense.shape[0]
a : Any = numpy.arange(snake_case ) * num_classes
a : List[str] = numpy.zeros((num_labels, num_classes) )
a : Dict = 1
return labels_one_hot
@deprecated(snake_case , 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : List[Any]=False , snake_case : Dict=10 ) -> Tuple:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=snake_case ) as bytestream:
a : List[Any] = _readaa(snake_case )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
a : List[str] = _readaa(snake_case )
a : Optional[int] = bytestream.read(snake_case )
a : Dict = numpy.frombuffer(snake_case , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(snake_case , snake_case )
return labels
class UpperCamelCase :
"""simple docstring"""
@deprecated(
__A , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : List[str]=dtypes.floataa , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Tuple=None , ):
"""simple docstring"""
a : Optional[Any] = random_seed.get_seed(__A)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
a : Optional[Any] = dtypes.as_dtype(__A).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype)
if fake_data:
a : Any = 1_0_0_0_0
a : str = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
a : Tuple = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
a : Optional[Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
a : Tuple = images.astype(numpy.floataa)
a : Tuple = numpy.multiply(__A , 1.0 / 2_5_5.0)
a : str = images
a : Union[str, Any] = labels
a : int = 0
a : Any = 0
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return self._images
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return self._labels
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self._num_examples
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
return self._epochs_completed
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Union[str, Any]=True):
"""simple docstring"""
if fake_data:
a : int = [1] * 7_8_4
a : Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__A)],
[fake_label for _ in range(__A)],
)
a : Any = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
a : Dict = numpy.arange(self._num_examples)
numpy.random.shuffle(__A)
a : Optional[Any] = self.images[perma]
a : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
a : List[str] = self._num_examples - start
a : Tuple = self._images[start : self._num_examples]
a : List[str] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
a : Optional[Any] = numpy.arange(self._num_examples)
numpy.random.shuffle(__A)
a : Any = self.images[perm]
a : Any = self.labels[perm]
# Start next epoch
a : Any = 0
a : List[Any] = batch_size - rest_num_examples
a : str = self._index_in_epoch
a : List[Any] = self._images[start:end]
a : Dict = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
a : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(snake_case , 'Please write your own downloading logic.' )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Optional[int] , snake_case : List[str] ) -> Dict:
"""simple docstring"""
if not gfile.Exists(snake_case ):
gfile.MakeDirs(snake_case )
a : Tuple = os.path.join(snake_case , snake_case )
if not gfile.Exists(snake_case ):
urllib.request.urlretrieve(snake_case , snake_case ) # noqa: S310
with gfile.GFile(snake_case ) as f:
a : List[str] = f.size()
print('Successfully downloaded' , snake_case , snake_case , 'bytes.' )
return filepath
@deprecated(
snake_case , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : str=False , snake_case : Tuple=False , snake_case : Union[str, Any]=dtypes.floataa , snake_case : Union[str, Any]=True , snake_case : List[Any]=5_000 , snake_case : Optional[Any]=None , snake_case : Optional[Any]=DEFAULT_SOURCE_URL , ) -> List[Any]:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=snake_case , one_hot=snake_case , dtype=snake_case , seed=snake_case )
a : Tuple = fake()
a : int = fake()
a : Union[str, Any] = fake()
return _Datasets(train=snake_case , validation=snake_case , test=snake_case )
if not source_url: # empty string check
a : Union[str, Any] = DEFAULT_SOURCE_URL
a : Optional[int] = '''train-images-idx3-ubyte.gz'''
a : str = '''train-labels-idx1-ubyte.gz'''
a : List[Any] = '''t10k-images-idx3-ubyte.gz'''
a : Optional[int] = '''t10k-labels-idx1-ubyte.gz'''
a : Any = _maybe_download(
snake_case , snake_case , source_url + train_images_file )
with gfile.Open(snake_case , 'rb' ) as f:
a : Any = _extract_images(snake_case )
a : List[Any] = _maybe_download(
snake_case , snake_case , source_url + train_labels_file )
with gfile.Open(snake_case , 'rb' ) as f:
a : List[str] = _extract_labels(snake_case , one_hot=snake_case )
a : Optional[int] = _maybe_download(
snake_case , snake_case , source_url + test_images_file )
with gfile.Open(snake_case , 'rb' ) as f:
a : Optional[int] = _extract_images(snake_case )
a : Dict = _maybe_download(
snake_case , snake_case , source_url + test_labels_file )
with gfile.Open(snake_case , 'rb' ) as f:
a : Optional[int] = _extract_labels(snake_case , one_hot=snake_case )
if not 0 <= validation_size <= len(snake_case ):
a : List[str] = (
'''Validation size should be between 0 and '''
F"""{len(snake_case )}. Received: {validation_size}."""
)
raise ValueError(snake_case )
a : Dict = train_images[:validation_size]
a : Optional[Any] = train_labels[:validation_size]
a : Union[str, Any] = train_images[validation_size:]
a : Optional[int] = train_labels[validation_size:]
a : Tuple = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
a : int = _DataSet(snake_case , snake_case , **snake_case )
a : List[str] = _DataSet(snake_case , snake_case , **snake_case )
a : Dict = _DataSet(snake_case , snake_case , **snake_case )
return _Datasets(train=snake_case , validation=snake_case , test=snake_case )
| 361 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class UpperCamelCase ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : List[str] = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(__a)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Dict = self._create_example_records()
a : str = Dataset.from_list(__a)
self.assertListEqual(dset.column_names , ['col_1', 'col_2'])
for i, r in enumerate(__a):
self.assertDictEqual(__a , example_records[i])
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[str] = self._create_example_records()
a : Tuple = Dataset.from_list(__a)
a : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def SCREAMING_SNAKE_CASE_ ( self : int): # checks what happens with missing columns
"""simple docstring"""
a : List[str] = [{'col_1': 1}, {'col_2': 'x'}]
a : Any = Dataset.from_list(__a)
self.assertDictEqual(dset[0] , {'col_1': 1})
self.assertDictEqual(dset[1] , {'col_1': None}) # NB: first record is used for columns
def SCREAMING_SNAKE_CASE_ ( self : Tuple): # checks if the type can be inferred from the second record
"""simple docstring"""
a : Optional[Any] = [{'col_1': []}, {'col_1': [1, 2]}]
a : Optional[int] = Dataset.from_list(__a)
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64')))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Tuple = Dataset.from_list([])
self.assertEqual(len(__a) , 0)
self.assertListEqual(dset.column_names , [])
| 362 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( snake_case : str = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
a : int = BeautifulSoup(requests.get(UpperCAmelCase__ ).text , 'html.parser' )
a : Dict = soup.findAll('h1' )
a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(UpperCAmelCase__ , UpperCAmelCase__ )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''') | 363 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : int = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
A : Dict = """vit_msn"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : int=1_2 , UpperCAmelCase_ : Optional[int]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : List[Any]=1e-06 , UpperCAmelCase_ : str=2_2_4 , UpperCAmelCase_ : int=1_6 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[Any]=True , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_A)
a : int = hidden_size
a : Tuple = num_hidden_layers
a : List[Any] = num_attention_heads
a : Any = intermediate_size
a : Any = hidden_act
a : List[str] = hidden_dropout_prob
a : List[str] = attention_probs_dropout_prob
a : Union[str, Any] = initializer_range
a : Any = layer_norm_eps
a : int = image_size
a : str = patch_size
a : Tuple = num_channels
a : int = qkv_bias
| 364 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( a__ , unittest.TestCase ):
"""simple docstring"""
A : Tuple = "ssube/stable-diffusion-x4-upscaler-onnx"
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple=0):
"""simple docstring"""
a : Tuple = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE_))
a : str = torch.manual_seed(SCREAMING_SNAKE_CASE_)
a : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : Union[str, Any] = self.get_dummy_inputs()
a : Optional[int] = pipe(**SCREAMING_SNAKE_CASE_).images
a : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a : Optional[int] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE_)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : List[str] = self.get_dummy_inputs()
a : int = pipe(**SCREAMING_SNAKE_CASE_).images
a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a : Tuple = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : Optional[int] = self.get_dummy_inputs()
a : Dict = pipe(**SCREAMING_SNAKE_CASE_).images
a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a : List[Any] = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : Optional[int] = self.get_dummy_inputs()
a : List[str] = pipe(**SCREAMING_SNAKE_CASE_).images
a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a : Optional[int] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : int = self.get_dummy_inputs()
a : List[str] = pipe(**SCREAMING_SNAKE_CASE_).images
a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a : Optional[int] = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ort.SessionOptions()
a : str = False
return options
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
a : Any = init_image.resize((1_2_8, 1_2_8))
# using the PNDM scheduler by default
a : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : Any = 'A fantasy landscape, trending on artstation'
a : int = torch.manual_seed(0)
a : Optional[int] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
a : Union[str, Any] = output.images
a : Tuple = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a : str = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
a : List[str] = init_image.resize((1_2_8, 1_2_8))
a : List[Any] = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler')
a : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
a : Optional[int] = 'A fantasy landscape, trending on artstation'
a : List[Any] = torch.manual_seed(0)
a : Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
a : Dict = output.images
a : Dict = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a : Optional[int] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 365 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 0 |
'''simple docstring'''
UpperCamelCase : Optional[int] = "Input must be a string of 8 numbers plus letter"
UpperCamelCase : List[Any] = "TRWAGMYFPDXBNJZSQVHLCKE"
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> bool:
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = F"""Expected string as input, found {type(lowerCAmelCase__ ).__name__}"""
raise TypeError(lowerCAmelCase__ )
a : List[Any] = spanish_id.replace('-' , '' ).upper()
if len(lowerCAmelCase__ ) != 9:
raise ValueError(lowerCAmelCase__ )
try:
a : Optional[int] = int(spanish_id_clean[0:8] )
a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCAmelCase__ ) from ex
if letter.isdigit():
raise ValueError(lowerCAmelCase__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 0 |
'''simple docstring'''
UpperCamelCase : Any = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCamelCase : Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase : Optional[int] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 367 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
a : Tuple = DisjunctiveConstraint(__a)
self.assertTrue(isinstance(dc.token_ids , __a))
with self.assertRaises(__a):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(__a):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Any = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__a):
DisjunctiveConstraint(__a) # fails here
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[str] = [[1, 2, 3], [1, 2, 4]]
a : Optional[Any] = DisjunctiveConstraint(__a)
a , a , a : int = dc.update(1)
a : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__a)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
a , a , a : List[Any] = dc.update(2)
a : int = stepped is True and completed is False and reset is False
self.assertTrue(__a)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
a , a , a : int = dc.update(3)
a : Tuple = stepped is True and completed is True and reset is False
self.assertTrue(__a)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : str = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a : Optional[Any] = DisjunctiveConstraint(__a)
a , a , a : Union[str, Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
a , a , a : List[Any] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
a , a , a : Optional[Any] = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
a , a , a : Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
a , a , a : int = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
a , a , a : List[str] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
a , a , a : Tuple = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 368 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class UpperCamelCase :
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Any = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase , _UpperCAmelCase)
a : Dict = TFVisionTextDualEncoderModel(_UpperCAmelCase)
a : Union[str, Any] = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : int):
"""simple docstring"""
a : List[str] = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase)
a : Any = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase)
a : Dict = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : str):
"""simple docstring"""
a : Any = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase)
a : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
a : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase)
a : List[str] = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase)
a : Tuple = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase)
a : int = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase)
a : Tuple = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase)
a : Dict = TFVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase)
a : str = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase)
a : Tuple = after_output[0].numpy()
a : Optional[int] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_UpperCAmelCase , 1e-5)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Any = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase)
a : Dict = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase)
a : Tuple = model(
input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase)
a : Any = output.vision_model_output.attentions
self.assertEqual(len(_UpperCAmelCase) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = to_atuple(vision_model.config.image_size)
a : str = to_atuple(vision_model.config.patch_size)
a : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a : Any = output.text_model_output.attentions
self.assertEqual(len(_UpperCAmelCase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float):
"""simple docstring"""
a : Any = np.abs((a - b)).max()
self.assertLessEqual(_UpperCAmelCase , _UpperCAmelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""")
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**_UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Dict = self.get_pretrained_model_and_inputs()
a : Tuple = model_a(**_UpperCAmelCase)
a : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_UpperCAmelCase)
a : Tuple = TFVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase)
a : str = model_a(**_UpperCAmelCase)
a : Tuple = after_outputs[0].numpy()
a : Optional[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_UpperCAmelCase , 1e-5)
@require_tf
class UpperCamelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert')
a : List[str] = 1_3
a : int = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a : List[str] = random_attention_mask([batch_size, 4])
a : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : List[Any] = TFViTModel(_UpperCAmelCase , name='vision_model')
a : List[str] = TFBertModel(_UpperCAmelCase , name='text_model')
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[int] = TFViTModelTester(self)
a : List[str] = TFBertModelTester(self)
a : Any = vit_model_tester.prepare_config_and_inputs()
a : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
a : Dict = vision_config_and_inputs
(
a
) : Union[str, Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCamelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta')
a : Union[str, Any] = 1_3
a : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a : List[Any] = random_attention_mask([batch_size, 4])
a : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Tuple = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase)
a : Any = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase)
a : int = model(
input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase)
a : Any = output.vision_model_output.attentions
self.assertEqual(len(_UpperCAmelCase) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a : Tuple = to_atuple(vision_model.config.image_size)
a : Dict = to_atuple(vision_model.config.patch_size)
a : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_UpperCAmelCase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = TFDeiTModel(_UpperCAmelCase , name='vision_model')
a : Dict = TFRobertaModel(_UpperCAmelCase , name='text_model')
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : int = TFDeiTModelTester(self)
a : List[str] = TFRobertaModelTester(self)
a : List[Any] = vit_model_tester.prepare_config_and_inputs()
a : Optional[int] = bert_model_tester.prepare_config_and_inputs()
a : Dict = vision_config_and_inputs
(
a
) : Union[str, Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCamelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert')
a : Dict = 1_3
a : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a : str = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a : Optional[int] = random_attention_mask([batch_size, 4])
a : Optional[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Optional[int] = TFCLIPVisionModel(_UpperCAmelCase , name='vision_model')
a : int = TFBertModel(_UpperCAmelCase , name='text_model')
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[int] = TFCLIPVisionModelTester(self)
a : List[Any] = TFBertModelTester(self)
a : int = clip_model_tester.prepare_config_and_inputs()
a : Dict = bert_model_tester.prepare_config_and_inputs()
a : Optional[Any] = vision_config_and_inputs
(
a
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=_UpperCAmelCase)
a : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a : Dict = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='np')
a : List[Any] = model(**_UpperCAmelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a : List[str] = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _UpperCAmelCase , atol=1e-3))
| 369 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase : List[str] = ["""text""", """image""", """audio"""]
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> int:
"""simple docstring"""
a : Optional[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> int:
"""simple docstring"""
a : Any = []
for output in outputs:
if isinstance(__SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append('text' )
elif isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCamelCase :
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'inputs'))
self.assertTrue(hasattr(self.tool , 'outputs'))
a : str = self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCamelCase__):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
a : Tuple = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Any = create_inputs(self.tool.inputs)
a : Dict = self.tool(*UpperCamelCase__)
# There is a single output
if len(self.tool.outputs) == 1:
a : Dict = [outputs]
self.assertListEqual(output_types(UpperCamelCase__) , self.tool.outputs)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'description'))
self.assertTrue(hasattr(self.tool , 'default_checkpoint'))
self.assertTrue(self.tool.description.startswith('This is a tool that'))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : int = create_inputs(self.tool.inputs)
a : str = self.tool(*UpperCamelCase__)
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
a : Any = [outputs]
self.assertEqual(len(UpperCamelCase__) , len(self.tool.outputs))
for output, output_type in zip(UpperCamelCase__ , self.tool.outputs):
a : List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Optional[Any] = create_inputs(self.tool.inputs)
a : List[Any] = []
for _input, input_type in zip(UpperCamelCase__ , self.tool.inputs):
if isinstance(UpperCamelCase__ , UpperCamelCase__):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
a : Union[str, Any] = self.tool(*UpperCamelCase__)
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
a : Union[str, Any] = [outputs]
self.assertEqual(len(UpperCamelCase__) , len(self.tool.outputs))
| 370 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : float ) -> Any:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 | '''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Optional[int] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Union[str, Any] = tempfile.mkdtemp()
a : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
a : int = {
'do_resize': True,
'size': {'height': 2_2_4, 'width': 2_2_4},
'do_center_crop': True,
'crop_size': {'height': 1_8, 'width': 1_8},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
a : List[str] = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , **UpperCAmelCase_ : int):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
a : Any = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Dict = self.get_tokenizer()
a : Dict = self.get_rust_tokenizer()
a : Dict = self.get_image_processor()
a : int = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
a : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
a : List[Any] = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
a : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a : int = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)')
a : Any = self.get_image_processor(do_normalize=UpperCAmelCase_)
a : int = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=UpperCAmelCase_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = self.get_image_processor()
a : Optional[int] = self.get_tokenizer()
a : Tuple = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
a : Dict = self.prepare_image_inputs()
a : Any = image_processor(UpperCAmelCase_ , return_tensors='np')
a : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = self.get_image_processor()
a : List[str] = self.get_tokenizer()
a : Dict = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
a : Optional[int] = 'Alexandra,T-shirt的价格是15便士。'
a : Dict = processor(text=UpperCAmelCase_)
a : str = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = self.get_image_processor()
a : Any = self.get_tokenizer()
a : str = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
a : str = 'Alexandra,T-shirt的价格是15便士。'
a : Union[str, Any] = self.prepare_image_inputs()
a : List[Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.get_image_processor()
a : int = self.get_tokenizer()
a : Any = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a : Dict = processor.batch_decode(UpperCAmelCase_)
a : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Tuple = self.get_image_processor()
a : Optional[Any] = self.get_tokenizer()
a : Dict = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
a : Union[str, Any] = 'Alexandra,T-shirt的价格是15便士。'
a : Optional[int] = self.prepare_image_inputs()
a : int = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 351 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 0 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Dict=False ) -> int:
"""simple docstring"""
try:
a : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a : str = default
else:
# KEY is set, convert it to True or False.
try:
a : List[str] = strtobool(snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase : str = parse_flag_from_env("""RUN_SLOW""", default=False)
UpperCamelCase : Optional[int] = parse_flag_from_env("""RUN_REMOTE""", default=False)
UpperCamelCase : Union[str, Any] = parse_flag_from_env("""RUN_LOCAL""", default=True)
UpperCamelCase : List[Any] = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
UpperCamelCase : Tuple = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
UpperCamelCase : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
UpperCamelCase : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
UpperCamelCase : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
UpperCamelCase : Dict = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
UpperCamelCase : Optional[Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
UpperCamelCase : Dict = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
a : Dict = unittest.skip('test requires faiss' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
a : int = unittest.skip('test requires regex' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
a : List[Any] = unittest.skip('test requires elasticsearch' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
a : Dict = unittest.skip('test requires sqlalchemy' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
a : str = unittest.skip('test requires PyTorch' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not config.TF_AVAILABLE:
a : Tuple = unittest.skip('test requires TensorFlow' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
a : Optional[Any] = unittest.skip('test requires JAX' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
a : str = unittest.skip('test requires Pillow' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> List[str]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
def _require_spacy_model(snake_case : Optional[int] ):
try:
import spacy # noqa F401
spacy.load(snake_case )
except ImportError:
return unittest.skip('test requires spacy' )(snake_case )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(snake_case ) )(snake_case )
else:
return test_case
return _require_spacy_model
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> List[str]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
a : List[Any] = unittest.skip('test is slow' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
a : Dict = unittest.skip('test is local' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> int:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
a : Optional[Any] = unittest.skip('test is packaged' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
a : Union[str, Any] = unittest.skip('test requires remote' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def decorate(cls : Optional[int] ):
for name, fn in cls.__dict__.items():
if callable(snake_case ) and name.startswith('test' ):
for decorator in decorators:
a : Tuple = decorator(snake_case )
setattr(cls , snake_case , snake_case )
return cls
return decorate
class UpperCamelCase ( a_ ):
"""simple docstring"""
pass
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = 0
A : Optional[int] = 1
A : Tuple = 2
@contextmanager
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str]=OfflineSimulationMode.CONNECTION_FAILS , snake_case : List[Any]=1E-1_6 ) -> Dict:
"""simple docstring"""
a : List[Any] = requests.Session().request
def timeout_request(snake_case : Optional[int] , snake_case : Tuple , snake_case : List[Any] , **snake_case : Optional[int] ):
# Change the url to an invalid url so that the connection hangs
a : List[str] = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
a : Any = timeout
try:
return online_request(snake_case , snake_case , **snake_case )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
a : str = url
a : Union[str, Any] = e.args[0]
a : Dict = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
a : Tuple = (max_retry_error,)
raise
def raise_connection_error(snake_case : Union[str, Any] , snake_case : Optional[Any] , **snake_case : Any ):
raise requests.ConnectionError('Offline mode is enabled.' , request=snake_case )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , snake_case ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , snake_case ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , snake_case ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] , **snake_case : int ) -> Any:
"""simple docstring"""
a : str = str(Path().resolve() )
with tempfile.TemporaryDirectory(*snake_case , **snake_case ) as tmp_dir:
try:
os.chdir(snake_case )
yield
finally:
os.chdir(snake_case )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
import gc
gc.collect()
a : List[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
a : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
return deepcopy(snake_case ).integers(0 , 100 , 10 ).tolist() == deepcopy(snake_case ).integers(0 , 100 , 10 ).tolist()
def SCREAMING_SNAKE_CASE__ ( snake_case : Any ) -> Any:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(snake_case : Tuple , *snake_case : Tuple , **snake_case : str ):
try:
return func(*snake_case , **snake_case )
except HTTPError as err:
if str(snake_case ).startswith('500' ) or str(snake_case ).startswith('502' ):
pytest.xfail(str(snake_case ) )
raise err
return decorator.decorator(_wrapper , snake_case )
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Optional[int] = returncode
a : Optional[Any] = stdout
a : Dict = stderr
async def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : List[Any] ) -> Any:
"""simple docstring"""
while True:
a : Tuple = await stream.readline()
if line:
callback(snake_case )
else:
break
async def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : int=None , snake_case : Union[str, Any]=None , snake_case : Optional[Any]=None , snake_case : Optional[int]=False , snake_case : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(snake_case ) )
a : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a : int = []
a : List[str] = []
def tee(snake_case : int , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : List[str]="" ):
a : Any = line.decode('utf-8' ).rstrip()
sink.append(snake_case )
if not quiet:
print(snake_case , snake_case , file=snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda snake_case : tee(snake_case , snake_case , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda snake_case : tee(snake_case , snake_case , sys.stderr , label='stderr:' ) ),
] , timeout=snake_case , )
return _RunOutput(await p.wait() , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Union[str, Any]=None , snake_case : List[Any]=None , snake_case : Dict=180 , snake_case : List[Any]=False , snake_case : Union[str, Any]=True ) -> _RunOutput:
"""simple docstring"""
a : Union[str, Any] = asyncio.get_event_loop()
a : Tuple = loop.run_until_complete(
_stream_subprocess(snake_case , env=snake_case , stdin=snake_case , timeout=snake_case , quiet=snake_case , echo=snake_case ) )
a : Optional[int] = ' '.join(snake_case )
if result.returncode > 0:
a : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
a : Union[str, Any] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
a : List[str] = re.sub(R'^gw' , '' , snake_case , 0 , re.M )
return int(snake_case )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a : List[Any] = 29_500
a : int = pytest_xdist_worker_id()
return port + uniq_delta
| 352 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 0 |
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None):
"""simple docstring"""
self.set_matricies(red=UpperCAmelCase_ , green=UpperCAmelCase_ , blue=UpperCAmelCase_ , red_edge=UpperCAmelCase_ , nir=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]=None):
"""simple docstring"""
if red is not None:
a : Tuple = red
if green is not None:
a : List[Any] = green
if blue is not None:
a : int = blue
if red_edge is not None:
a : Tuple = red_edge
if nir is not None:
a : Optional[int] = nir
return True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int="" , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None):
"""simple docstring"""
self.set_matricies(red=UpperCAmelCase_ , green=UpperCAmelCase_ , blue=UpperCAmelCase_ , red_edge=UpperCAmelCase_ , nir=UpperCAmelCase_)
a : str = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : int=0.08 , UpperCAmelCase_ : Dict=1.22 , UpperCAmelCase_ : int=0.03):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return (self.nir / self.green) - 1
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return (self.red - self.blue) / self.red
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[str] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self.nir - self.green
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[str] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : List[Any]=0.16):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : List[str]=0.5):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[Any]=None):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self.nir / self.red
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[Any] = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
a : int = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self.nir / self.red
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 353 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
A : ClassVar[Features] = Features({"text": Value("string" )} )
A : ClassVar[Features] = Features({} )
A : str = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return {self.text_column: "text"}
| 354 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
UpperCamelCase : str = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
UpperCamelCase : Any = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
UpperCamelCase : str = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32')),
'references': datasets.Sequence(datasets.Value('int32')),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Any="binary" , UpperCAmelCase_ : str=None):
"""simple docstring"""
a : str = fa_score(
UpperCAmelCase_ , UpperCAmelCase_ , labels=UpperCAmelCase_ , pos_label=UpperCAmelCase_ , average=UpperCAmelCase_ , sample_weight=UpperCAmelCase_)
return {"f1": float(UpperCAmelCase_) if score.size == 1 else score}
| 355 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 0 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def SCREAMING_SNAKE_CASE__ ( snake_case : Any ) -> Dict:
"""simple docstring"""
a : Any = {}
a : Dict = tokenizer(example['content'] , truncation=snake_case )['input_ids']
a : Dict = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase : str = HfArgumentParser(PretokenizationArguments)
UpperCamelCase : List[str] = parser.parse_args()
if args.num_workers is None:
UpperCamelCase : Union[str, Any] = multiprocessing.cpu_count()
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase : Optional[int] = time.time()
UpperCamelCase : List[Any] = load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase : List[Any] = time.time()
UpperCamelCase : str = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 356 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int , snake_case : int = 0 , snake_case : int = 0 ) -> int:
"""simple docstring"""
a : int = right or len(snake_case ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(snake_case , snake_case , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int ):
"""simple docstring"""
if num <= 0:
raise ValueError('Input must be a positive integer' )
a : Optional[Any] = [True] * (num + 1)
a : Union[str, Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case ):
a : List[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 358 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple=None ) -> Union[str, Any]:
if subparsers is not None:
a : str = subparsers.add_parser('test' )
else:
a : int = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=snake_case , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case )
return parser
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> Union[str, Any]:
a : Any = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
a : str = script_name
else:
a : str = F"""--config_file={args.config_file} {script_name}"""
a : str = ['accelerate-launch'] + test_args.split()
a : int = execute_subprocess_async(snake_case , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
a : Tuple = test_command_parser()
a : Dict = parser.parse_args()
test_command(snake_case )
if __name__ == "__main__":
main()
| 359 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Optional[int] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0])
a : Dict = get_activation('gelu')
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase_) , torch_builtin(UpperCAmelCase_)))
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase_) , gelu_new(UpperCAmelCase_)))
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0])
a : str = get_activation('gelu')
a : Optional[int] = get_activation('gelu_10')
a : Optional[int] = torch_builtin(UpperCAmelCase_)
a : List[str] = geluaa(UpperCAmelCase_)
a : List[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(UpperCAmelCase_).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
get_activation('gelu')
get_activation('gelu_10')
get_activation('gelu_fast')
get_activation('gelu_new')
get_activation('gelu_python')
get_activation('gelu_pytorch_tanh')
get_activation('linear')
get_activation('mish')
get_activation('quick_gelu')
get_activation('relu')
get_activation('sigmoid')
get_activation('silu')
get_activation('swish')
get_activation('tanh')
with self.assertRaises(UpperCAmelCase_):
get_activation('bogus')
with self.assertRaises(UpperCAmelCase_):
get_activation(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[int] = get_activation('gelu')
a : Dict = 1
a : Dict = get_activation('gelu')
self.assertEqual(acta.a , 1)
with self.assertRaises(UpperCAmelCase_):
a : str = acta.a
| 360 | '''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
UpperCamelCase : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int=1_6 , UpperCAmelCase_ : List[str]=1_3 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : Optional[Any]=1_4 , UpperCAmelCase_ : int=1_0 , UpperCAmelCase_ : Dict=1_9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=1_6 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=[1, 2, 3, 4, 5] , UpperCAmelCase_ : Optional[int]=2_5 , UpperCAmelCase_ : Dict=5 , ):
"""simple docstring"""
a : Any = d_model
a : List[str] = parent
a : int = batch_size
a : List[Any] = prediction_length
a : Tuple = context_length
a : Optional[int] = cardinality
a : List[Any] = num_time_features
a : Tuple = lags_sequence
a : Union[str, Any] = embedding_dimension
a : Union[str, Any] = is_training
a : Optional[int] = hidden_size
a : Optional[int] = num_hidden_layers
a : Optional[int] = num_attention_heads
a : Dict = intermediate_size
a : Optional[int] = hidden_act
a : List[Any] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Union[str, Any] = context_length
a : List[Any] = prediction_length + label_length
a : Optional[int] = label_length
a : List[Any] = moving_average
a : Optional[Any] = autocorrelation_factor
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[str] = config.context_length + max(config.lags_sequence)
a : str = ids_tensor([self.batch_size, 1] , config.cardinality[0])
a : str = floats_tensor([self.batch_size, _past_length, config.num_time_features])
a : List[str] = floats_tensor([self.batch_size, _past_length])
a : Dict = floats_tensor([self.batch_size, _past_length]) > 0.5
# decoder inputs
a : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features])
a : Optional[int] = floats_tensor([self.batch_size, config.prediction_length])
a : Any = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : List[str] = self.get_config()
a : int = self.prepare_autoformer_inputs_dict(UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Optional[int] = AutoformerModel(config=UpperCAmelCase_).to(UpperCAmelCase_).eval()
a : List[str] = model(**UpperCAmelCase_)
a : Tuple = outputs.encoder_last_hidden_state
a : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a : Tuple = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase_)
a : Any = AutoformerEncoder.from_pretrained(UpperCAmelCase_).to(UpperCAmelCase_)
a : Dict = model.create_network_inputs(**UpperCAmelCase_)
a : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...])
a : List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
a : Optional[Any] = encoder(inputs_embeds=UpperCAmelCase_)[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3)
a : Dict = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1)
.unsqueeze(1)
.repeat(1 , config.prediction_length , 1)
)
a : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
a : List[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
a : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
a : Union[str, Any] = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase_)
a : str = AutoformerDecoder.from_pretrained(UpperCAmelCase_).to(UpperCAmelCase_)
a : Tuple = decoder(
trend=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
A : int = (AutoformerForPrediction,) if is_torch_available() else ()
A : Dict = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
A : Optional[Any] = False
A : Optional[int] = False
A : Optional[int] = False
A : Any = False
A : Optional[int] = False
A : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : List[Any] = AutoformerModelTester(self)
a : List[str] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a : Any = model_class(UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
a : Optional[Any] = model_class.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_)
self.assertEqual(info['missing_keys'] , [])
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase_)
@unittest.skip(reason='Model has no tokens embeddings')
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = inspect.signature(getattr(UpperCAmelCase_ , 'forward'))
# The main input is the name of the argument after `self`
a : Union[str, Any] = list(model_signature.parameters.keys())[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = model_class(UpperCAmelCase_)
a : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Optional[Any] = [*signature.parameters.keys()]
a : str = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask')
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
])
self.assertListEqual(arg_names[: len(UpperCAmelCase_)] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Any = True
a : Union[str, Any] = getattr(self.model_tester , 'seq_length' , UpperCAmelCase_)
a : int = getattr(self.model_tester , 'decoder_seq_length' , UpperCAmelCase_)
a : Any = getattr(self.model_tester , 'encoder_seq_length' , UpperCAmelCase_)
a : str = getattr(self.model_tester , 'd_model' , UpperCAmelCase_)
a : List[str] = getattr(self.model_tester , 'num_attention_heads' , UpperCAmelCase_)
a : str = d_model // num_attention_heads
for model_class in self.all_model_classes:
a : str = True
a : Union[str, Any] = False
a : List[Any] = True
a : List[str] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : Optional[int] = True
a : Optional[int] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : List[str] = outputs.encoder_attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
a : Optional[Any] = len(UpperCAmelCase_)
a : Any = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
# decoder attentions
a : Any = outputs.decoder_attentions
self.assertIsInstance(UpperCAmelCase_ , (list, tuple))
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
a : List[str] = outputs.cross_attentions
self.assertIsInstance(UpperCAmelCase_ , (list, tuple))
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
a : Union[str, Any] = True
a : List[str] = True
a : Any = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
self.assertEqual(out_len + 2 , len(UpperCAmelCase_))
a : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str]="train-batch.pt" ) -> Optional[int]:
"""simple docstring"""
a : str = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=snake_case , repo_type='dataset' )
a : List[Any] = torch.load(snake_case , map_location=snake_case )
return batch
@require_torch
@slow
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Any = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly').to(UpperCAmelCase_)
a : Dict = prepare_batch()
with torch.no_grad():
a : Union[str, Any] = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
a : Optional[int] = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size))
self.assertEqual(output.shape , UpperCAmelCase_)
a : Tuple = torch.tensor(
[[0.35_93, -1.33_98, 0.63_30], [0.22_79, 1.53_96, -0.17_92], [0.04_50, 1.32_25, -0.23_35]] , device=UpperCAmelCase_)
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Any = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly').to(UpperCAmelCase_)
a : str = prepare_batch('val-batch.pt')
with torch.no_grad():
a : Optional[Any] = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
a : Optional[int] = torch.Size((6_4, model.config.context_length, model.config.d_model))
self.assertEqual(output.shape , UpperCAmelCase_)
a : List[str] = torch.tensor(
[[-0.07_34, -0.90_36, 0.83_58], [4.71_86, 2.41_13, 1.95_81], [1.79_53, 2.35_58, 1.29_70]] , device=UpperCAmelCase_)
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[str] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly').to(UpperCAmelCase_)
a : Tuple = prepare_batch('val-batch.pt')
with torch.no_grad():
a : Optional[Any] = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
a : str = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length))
self.assertEqual(outputs.sequences.shape , UpperCAmelCase_)
a : Tuple = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=UpperCAmelCase_)
a : List[str] = outputs.sequences.mean(dim=1)
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCAmelCase_ , rtol=1e-1))
| 361 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
'''simple docstring'''
UpperCamelCase : List[Any] = 256
# Modulus to hash a string
UpperCamelCase : List[Any] = 1_000_003
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str ) -> bool:
"""simple docstring"""
a : Dict = len(snake_case )
a : Dict = len(snake_case )
if p_len > t_len:
return False
a : int = 0
a : int = 0
a : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case ):
a : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
a : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
a : Dict = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
a : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
a : Union[str, Any] = 'abc1abc12'
a : Union[str, Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
a : List[Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(snake_case , snake_case ) and not rabin_karp(snake_case , snake_case )
# Test 2)
a : Optional[int] = 'ABABX'
a : Optional[int] = 'ABABZABABYABABX'
assert rabin_karp(snake_case , snake_case )
# Test 3)
a : List[Any] = 'AAAB'
a : int = 'ABAAAAAB'
assert rabin_karp(snake_case , snake_case )
# Test 4)
a : int = 'abcdabcy'
a : List[str] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(snake_case , snake_case )
# Test 5)
a : int = 'Lü'
a : Tuple = 'Lüsai'
assert rabin_karp(snake_case , snake_case )
a : str = 'Lue'
assert not rabin_karp(snake_case , snake_case )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 362 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(snake_case , snake_case ):
return 0
elif n == 2:
return 1
else:
a : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
a : Dict = 0
a : List[Any] = 2
while digits < n:
index += 1
a : str = len(str(fibonacci(snake_case ) ) )
return index
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 363 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , ):
"""simple docstring"""
a : Optional[int] = parent
a : Dict = 1_3
a : int = 7
a : Optional[int] = True
a : Tuple = True
a : Optional[Any] = True
a : Optional[int] = 9_9
a : Tuple = 3_2
a : Any = 2
a : Optional[int] = 4
a : str = 3_7
a : str = 'gelu'
a : Any = 0.1
a : List[str] = 0.1
a : Optional[int] = 5_1_2
a : Union[str, Any] = 1_6
a : Optional[Any] = 2
a : Optional[Any] = 0.02
a : Dict = 3
a : Optional[int] = 4
a : Tuple = None
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Union[str, Any] = None
if self.use_input_mask:
a : Tuple = random_attention_mask([self.batch_size, self.seq_length])
a : int = None
a : List[str] = None
a : int = None
if self.use_labels:
a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : int = ids_tensor([self.batch_size] , self.num_choices)
a : Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
(
a
) : List[Any] = self.prepare_config_and_inputs()
a : str = True
a : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str):
"""simple docstring"""
a : Optional[Any] = TFEsmModel(config=UpperCAmelCase_)
a : int = {'input_ids': input_ids, 'attention_mask': input_mask}
a : Dict = model(UpperCAmelCase_)
a : Union[str, Any] = [input_ids, input_mask]
a : Any = model(UpperCAmelCase_)
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Tuple = True
a : Optional[Any] = TFEsmModel(config=UpperCAmelCase_)
a : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
a : Tuple = model(UpperCAmelCase_)
a : Union[str, Any] = [input_ids, input_mask]
a : str = model(UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_)
# Also check the case where encoder outputs are not passed
a : List[str] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : Optional[int] = TFEsmForMaskedLM(config=UpperCAmelCase_)
a : List[Any] = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Dict = self.num_labels
a : Dict = TFEsmForTokenClassification(config=UpperCAmelCase_)
a : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Any = self.prepare_config_and_inputs()
(
a
) : str = config_and_inputs
a : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
A : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Dict = False
A : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[Any] = TFEsmModelTester(self)
a : int = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : str = TFEsmModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Any = model_class(UpperCAmelCase_)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
a : Union[str, Any] = model.get_bias()
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
for k, v in name.items():
assert isinstance(UpperCAmelCase_ , tf.Variable)
else:
a : int = model.get_output_embeddings()
assert x is None
a : Tuple = model.get_bias()
assert name is None
@require_tf
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D')
a : Dict = tf.constant([[0, 1, 2, 3, 4, 5]])
a : List[str] = model(UpperCAmelCase_)[0]
a : Union[str, Any] = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape) , UpperCAmelCase_)
# compare the actual values for a slice.
a : Any = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Union[str, Any] = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D')
a : Tuple = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]])
a : Any = model(UpperCAmelCase_)[0]
# compare the actual values for a slice.
a : Dict = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 364 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
UpperCamelCase : Optional[int] = get_logger(__name__)
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
a : Dict = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
a : str = module._original_module if isinstance(UpperCAmelCase_ , _PatchedModuleObj) else module
class UpperCamelCase :
"""simple docstring"""
A : List[str] = []
def __init__( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple=None):
"""simple docstring"""
a : Union[str, Any] = obj
a : Dict = target
a : List[Any] = new
a : List[Any] = target.split('.')[0]
a : Dict = {}
a : Union[str, Any] = attrs or []
def __enter__( self : str):
"""simple docstring"""
a : Optional[int] = self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCAmelCase_)):
try:
a : List[Any] = import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a : int = getattr(self.obj , UpperCAmelCase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCAmelCase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a : List[str] = obj_attr
# patch at top level
setattr(self.obj , UpperCAmelCase_ , _PatchedModuleObj(UpperCAmelCase_ , attrs=self.attrs))
a : Union[str, Any] = getattr(self.obj , UpperCAmelCase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , _PatchedModuleObj(getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) , attrs=self.attrs))
a : Optional[int] = getattr(UpperCAmelCase_ , UpperCAmelCase_)
# finally set the target attribute
setattr(UpperCAmelCase_ , UpperCAmelCase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a : Tuple = getattr(import_module('.'.join(UpperCAmelCase_)) , UpperCAmelCase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , UpperCAmelCase_) is attr_value:
a : List[Any] = getattr(self.obj , UpperCAmelCase_)
setattr(self.obj , UpperCAmelCase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a : Any = globals()['__builtins__'][target_attr]
setattr(self.obj , UpperCAmelCase_ , self.new)
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self : Optional[int] , *UpperCAmelCase_ : List[str]):
"""simple docstring"""
for attr in list(self.original):
setattr(self.obj , UpperCAmelCase_ , self.original.pop(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 365 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase : str = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[Any] = "ernie_m"
A : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : str , UpperCAmelCase_ : int = 2_5_0_0_0_2 , UpperCAmelCase_ : int = 7_6_8 , UpperCAmelCase_ : int = 1_2 , UpperCAmelCase_ : int = 1_2 , UpperCAmelCase_ : int = 3_0_7_2 , UpperCAmelCase_ : str = "gelu" , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : int = 5_1_4 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 1e-05 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=0.0 , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : int = vocab_size
a : Dict = hidden_size
a : Optional[int] = num_hidden_layers
a : Any = num_attention_heads
a : Tuple = intermediate_size
a : Union[str, Any] = hidden_act
a : Optional[Any] = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Union[str, Any] = max_position_embeddings
a : Dict = initializer_range
a : Optional[Any] = layer_norm_eps
a : Any = classifier_dropout
a : List[str] = is_decoder
a : Dict = act_dropout
| 366 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345 | 0 |
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=1_3 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=9_9 , UpperCAmelCase_ : List[Any]=6_4 , UpperCAmelCase_ : Any=3_2 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Optional[int]=3_7 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=5_1_2 , UpperCAmelCase_ : Union[str, Any]=1_6 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Any=None , ):
"""simple docstring"""
a : str = parent
a : Any = batch_size
a : Optional[int] = seq_length
a : int = is_training
a : Union[str, Any] = use_input_mask
a : Optional[Any] = use_token_type_ids
a : Optional[Any] = use_labels
a : Optional[Any] = vocab_size
a : Optional[Any] = hidden_size
a : int = embedding_size
a : Dict = num_hidden_layers
a : Dict = num_attention_heads
a : List[Any] = intermediate_size
a : str = hidden_act
a : str = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Any = type_vocab_size
a : Optional[int] = type_sequence_label_size
a : Optional[int] = initializer_range
a : Optional[int] = num_labels
a : int = num_choices
a : Optional[int] = scope
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Tuple = None
if self.use_input_mask:
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_token_type_ids:
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : Union[str, Any] = None
a : Any = None
a : Union[str, Any] = None
if self.use_labels:
a : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
a : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = MobileBertModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
a : List[str] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Dict = MobileBertForMaskedLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str):
"""simple docstring"""
a : Optional[int] = MobileBertForNextSentencePrediction(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : str = MobileBertForPreTraining(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , next_sentence_label=UpperCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = MobileBertForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : str = self.num_labels
a : Tuple = MobileBertForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : str = self.num_labels
a : Union[str, Any] = MobileBertForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = self.num_choices
a : int = MobileBertForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : str = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Tuple = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Tuple = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Tuple = self.prepare_config_and_inputs()
(
a
) : Dict = config_and_inputs
a : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A : Any = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Optional[int] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class in get_values(UpperCAmelCase_):
a : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_)
a : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : List[str] = MobileBertModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return torch.tensor(
snake_case , dtype=torch.long , device=snake_case , )
UpperCamelCase : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased').to(UpperCAmelCase_)
a : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
a : str = model(UpperCAmelCase_)[0]
a : str = torch.Size((1, 9, 5_1_2))
self.assertEqual(output.shape , UpperCAmelCase_)
a : Tuple = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
] , device=UpperCAmelCase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE)
a : List[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE)
self.assertTrue(lower_bound and upper_bound)
| 368 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __lt__( self : str , UpperCAmelCase_ : Dict):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : Optional[Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self[-1] == other[-1]
def SCREAMING_SNAKE_CASE__ ( snake_case : list ) -> list:
"""simple docstring"""
a : list[Stack] = []
# sort into stacks
for element in collection:
a : Optional[int] = Stack([element] )
a : Union[str, Any] = bisect_left(snake_case , snake_case )
if i != len(snake_case ):
stacks[i].append(snake_case )
else:
stacks.append(snake_case )
# use a heap-based merge to merge stack efficiently
a : List[Any] = merge(*(reversed(snake_case ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase : Dict = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 369 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 0 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase : int = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCamelCase : List[str] = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[Any] = "mask2former"
A : str = ["swin"]
A : List[Any] = {"hidden_size": "hidden_dim"}
def __init__( self : Any , UpperCAmelCase_ : Optional[Dict] = None , UpperCAmelCase_ : int = 2_5_6 , UpperCAmelCase_ : int = 2_5_6 , UpperCAmelCase_ : int = 2_5_6 , UpperCAmelCase_ : int = 1_0_2_4 , UpperCAmelCase_ : str = "relu" , UpperCAmelCase_ : int = 6 , UpperCAmelCase_ : int = 1_0 , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 2_0_4_8 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 4 , UpperCAmelCase_ : int = 2_5_5 , UpperCAmelCase_ : int = 1_0_0 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 2.0 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : int = 1_2_5_4_4 , UpperCAmelCase_ : float = 3.0 , UpperCAmelCase_ : float = 0.75 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : List[int] = [4, 8, 1_6, 3_2] , UpperCAmelCase_ : bool = None , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
a : Any = CONFIG_MAPPING['swin'](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Dict = backbone_config.pop('model_type')
a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
a : Union[str, Any] = config_class.from_dict(UpperCAmelCase_)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported)}""")
a : Optional[int] = backbone_config
a : Dict = feature_size
a : Union[str, Any] = mask_feature_size
a : Tuple = hidden_dim
a : int = encoder_feedforward_dim
a : List[Any] = activation_function
a : Dict = encoder_layers
a : int = decoder_layers
a : Any = num_attention_heads
a : Union[str, Any] = dropout
a : Tuple = dim_feedforward
a : List[Any] = pre_norm
a : List[Any] = enforce_input_projection
a : Tuple = common_stride
a : Union[str, Any] = ignore_value
a : Tuple = num_queries
a : Any = no_object_weight
a : int = class_weight
a : Dict = mask_weight
a : Union[str, Any] = dice_weight
a : Tuple = train_num_points
a : Any = oversample_ratio
a : Union[str, Any] = importance_sample_ratio
a : Dict = init_std
a : List[str] = init_xavier_std
a : str = use_auxiliary_loss
a : str = feature_strides
a : Optional[int] = output_auxiliary_logits
a : Tuple = decoder_layers
super().__init__(**UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return cls(
backbone_config=UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = copy.deepcopy(self.__dict__)
a : Optional[Any] = self.backbone_config.to_dict()
a : Tuple = self.__class__.model_type
return output
| 370 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : List[Any] = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 | '''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
a : List[str] = [1]
a : int = 0, 0, 0
a : Tuple = ugly_nums[ia] * 2
a : List[Any] = ugly_nums[ia] * 3
a : List[str] = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
a : Dict = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
a : int = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
a : List[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
a : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 350 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "deit"
def __init__( self : Any , UpperCAmelCase_ : Optional[int]=7_6_8 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : List[str]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Dict=1e-12 , UpperCAmelCase_ : str=2_2_4 , UpperCAmelCase_ : List[str]=1_6 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=1_6 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : Union[str, Any] = intermediate_size
a : Optional[Any] = hidden_act
a : str = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : Dict = initializer_range
a : Optional[Any] = layer_norm_eps
a : Dict = image_size
a : int = patch_size
a : List[Any] = num_channels
a : Optional[int] = qkv_bias
a : str = encoder_stride
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return 1e-4
| 351 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 0 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase : Union[str, Any] = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : int ) -> Any:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a : Union[str, Any] = _TestCommandArgs(dataset=snake_case , all_configs=snake_case , save_infos=snake_case )
a : Dict = TestCommand(*snake_case )
test_command.run()
a : str = os.path.join(snake_case , 'README.md' )
assert os.path.exists(snake_case )
a : Dict = DatasetInfosDict.from_directory(snake_case )
a : str = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) , splits=[
{
'name': 'train',
'num_bytes': 2_351_563,
'num_examples': 10_000,
},
{
'name': 'validation',
'num_bytes': 238_418,
'num_examples': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
a : Any = getattr(dataset_infos['default'] , snake_case ), getattr(expected_dataset_infos['default'] , snake_case )
if key == "num_bytes":
assert is_apercent_close(snake_case , snake_case )
elif key == "splits":
assert list(snake_case ) == list(snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 352 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase : Optional[int] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase : Tuple = """cuda""" if torch.cuda.is_available() else """cpu"""
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Any=100 , snake_case : Tuple=" " ) -> List[str]:
"""simple docstring"""
a : str = text.split(snake_case )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(snake_case ) , snake_case )]
def SCREAMING_SNAKE_CASE__ ( snake_case : dict ) -> dict:
"""simple docstring"""
a : List[str] = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(snake_case ):
titles.append(title if title is not None else '' )
texts.append(snake_case )
return {"title": titles, "text": texts}
def SCREAMING_SNAKE_CASE__ ( snake_case : dict , snake_case : DPRContextEncoder , snake_case : DPRContextEncoderTokenizerFast ) -> dict:
"""simple docstring"""
a : Optional[Any] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=snake_case , padding='longest' , return_tensors='pt' )['input_ids']
a : List[str] = ctx_encoder(input_ids.to(device=snake_case ) , return_dict=snake_case ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def SCREAMING_SNAKE_CASE__ ( snake_case : "RagExampleArguments" , snake_case : "ProcessingArguments" , snake_case : "IndexHnswArguments" , ) -> str:
"""simple docstring"""
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
a : Tuple = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
a : str = dataset.map(snake_case , batched=snake_case , num_proc=processing_args.num_proc )
# And compute the embeddings
a : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case )
a : int = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
a : Union[str, Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
a : List[str] = dataset.map(
partial(snake_case , ctx_encoder=snake_case , ctx_tokenizer=snake_case ) , batched=snake_case , batch_size=processing_args.batch_size , features=snake_case , )
# And finally save your dataset
a : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(snake_case )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
a : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=snake_case )
# And save the index
a : Dict = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(snake_case )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(
default=str(Path(a_ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A : Optional[str] = field(
default=a_ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A : Optional[str] = field(
default=str(Path(a_ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : Optional[int] = field(
default=a_ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase : Dict = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase : int = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase : Union[str, Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 353 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : str = PegasusTokenizer
A : Dict = PegasusTokenizerFast
A : str = True
A : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a : Any = PegasusTokenizer(UpperCAmelCase_)
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large')
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = '</s>'
a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '</s>')
self.assertEqual(vocab_keys[-1] , 'v')
self.assertEqual(len(UpperCAmelCase_) , 1_1_0_3)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
a : int = self.tokenizer_class.from_pretrained(self.tmpdirname)
a : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
a : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
a : Dict = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[int] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
a : str = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
a : Tuple = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
a : str = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
a : List[str] = 'To ensure a smooth flow of bank resolutions.'
a : Any = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
a : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Any = ['This is going to be way too long.' * 1_5_0, 'short example']
a : str = ['not super long but more than 5 tokens', 'tiny']
a : int = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = {'input_ids': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = PegasusTokenizer
A : Any = PegasusTokenizerFast
A : List[str] = True
A : str = True
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a : Union[str, Any] = PegasusTokenizer(UpperCAmelCase_ , offset=0 , mask_token_sent=UpperCAmelCase_ , mask_token='[MASK]')
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
a : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname)
a : Any = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
a : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
a : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : str = ['This is going to be way too long.' * 1_0_0_0, 'short example']
a : int = ['not super long but more than 5 tokens', 'tiny']
a : List[Any] = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
a : Dict = self._large_tokenizer(UpperCAmelCase_).input_ids
self.assertListEqual(
UpperCAmelCase_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 354 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 355 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=0.2 , UpperCAmelCase_ : Optional[int]=0.2):
"""simple docstring"""
a : int = bp_numa
a : int = bp_numa
a : List[Any] = bp_numa
a : int = conva_get[:2]
a : List[Any] = conva_get[2]
a : List[Any] = size_pa
a : Optional[Any] = rate_w
a : Tuple = rate_t
a : List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
a : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a : str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a : Dict = -2 * np.random.rand(self.conva[1]) + 1
a : List[str] = -2 * np.random.rand(self.num_bpa) + 1
a : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : Tuple = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(UpperCAmelCase_ , 'wb') as f:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_)
print(f"""Model saved: {save_path}""")
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
with open(UpperCAmelCase_ , 'rb') as f:
a : str = pickle.load(UpperCAmelCase_) # noqa: S301
a : Any = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
a : Dict = model_dic.get('size_pooling1')
a : str = model_dic.get('num_bp1')
a : Union[str, Any] = model_dic.get('num_bp2')
a : str = model_dic.get('num_bp3')
a : List[str] = model_dic.get('rate_weight')
a : str = model_dic.get('rate_thre')
# create model instance
a : Optional[Any] = CNN(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# modify model parameter
a : Union[str, Any] = model_dic.get('w_conv1')
a : int = model_dic.get('wkj')
a : List[Any] = model_dic.get('vji')
a : List[Any] = model_dic.get('thre_conv1')
a : Optional[Any] = model_dic.get('thre_bp2')
a : int = model_dic.get('thre_bp3')
return conv_ins
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : int):
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return round(UpperCAmelCase_ , 3)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Dict = convs[0]
a : Tuple = convs[1]
a : Any = np.shape(UpperCAmelCase_)[0]
# get the data slice of original image data, data_focus
a : str = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_):
a : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCAmelCase_)
# calculate the feature map of every single kernel, and saved as list of matrix
a : Union[str, Any] = []
a : Optional[Any] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(UpperCAmelCase_):
a : Union[str, Any] = []
for i_focus in range(len(UpperCAmelCase_)):
a : List[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCAmelCase_))
a : str = np.asmatrix(UpperCAmelCase_).reshape(
UpperCAmelCase_ , UpperCAmelCase_)
data_featuremap.append(UpperCAmelCase_)
# expanding the data slice to One dimenssion
a : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCAmelCase_))
a : Union[str, Any] = np.asarray(UpperCAmelCase_)
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple="average_pool"):
"""simple docstring"""
a : Dict = len(featuremaps[0])
a : Union[str, Any] = int(size_map / size_pooling)
a : Tuple = []
for i_map in range(len(UpperCAmelCase_)):
a : Dict = featuremaps[i_map]
a : str = []
for i_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
for j_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
a : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCAmelCase_))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCAmelCase_))
a : Any = np.asmatrix(UpperCAmelCase_).reshape(UpperCAmelCase_ , UpperCAmelCase_)
featuremap_pooled.append(UpperCAmelCase_)
return featuremap_pooled
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = []
for i in range(len(UpperCAmelCase_)):
a : int = np.shape(data[i])
a : int = data[i].reshape(1 , shapes[0] * shapes[1])
a : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCAmelCase_)
a : Optional[int] = np.asarray(UpperCAmelCase_)
return data_expanded
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : int):
"""simple docstring"""
a : Any = np.asarray(UpperCAmelCase_)
a : List[Any] = np.shape(UpperCAmelCase_)
a : Optional[int] = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str):
"""simple docstring"""
a : Any = []
a : Optional[int] = 0
for i_map in range(UpperCAmelCase_):
a : Any = np.ones((size_map, size_map))
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
for j in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
a : Dict = pd_pool[
i_pool
]
a : List[str] = i_pool + 1
a : Dict = np.multiply(
UpperCAmelCase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(UpperCAmelCase_)
return pd_all
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=bool):
"""simple docstring"""
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(UpperCAmelCase_)))
print((' - - Shape: Teach_Data ', np.shape(UpperCAmelCase_)))
a : Optional[Any] = 0
a : Dict = []
a : str = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
a : str = 0
print(f"""-------------Learning Time {rp}--------------""")
for p in range(len(UpperCAmelCase_)):
# print('------------Learning Image: %d--------------'%p)
a : str = np.asmatrix(datas_train[p])
a : int = np.asarray(datas_teach[p])
a : Dict = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a : Any = self.pooling(UpperCAmelCase_ , self.size_poolinga)
a : Dict = np.shape(UpperCAmelCase_)
a : List[Any] = self._expand(UpperCAmelCase_)
a : Union[str, Any] = data_bp_input
a : List[Any] = np.dot(UpperCAmelCase_ , self.vji.T) - self.thre_bpa
a : List[Any] = self.sig(UpperCAmelCase_)
a : str = np.dot(UpperCAmelCase_ , self.wkj.T) - self.thre_bpa
a : List[str] = self.sig(UpperCAmelCase_)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a : int = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCAmelCase_ , (1 - bp_outa)))
a : Dict = np.multiply(
np.dot(UpperCAmelCase_ , self.wkj) , np.multiply(UpperCAmelCase_ , (1 - bp_outa)))
a : List[str] = np.dot(UpperCAmelCase_ , self.vji)
a : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
a : Any = pd_conva_pooled.T.getA().tolist()
a : Dict = self._calculate_gradient_from_pool(
UpperCAmelCase_ , UpperCAmelCase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
a : Tuple = self._expand_mat(pd_conva_all[k_conv])
a : Any = self.rate_weight * np.dot(UpperCAmelCase_ , UpperCAmelCase_)
a : List[str] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
a : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
a : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a : str = self.thre_bpa - pd_k_all * self.rate_thre
a : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a : Union[str, Any] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a : Union[str, Any] = rp + 1
a : Tuple = error_count / patterns
all_mse.append(UpperCAmelCase_)
def draw_error():
a : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(UpperCAmelCase_ , '+-')
plt.plot(UpperCAmelCase_ , 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(UpperCAmelCase_ , alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}"""))
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Dict = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(UpperCAmelCase_)))
for p in range(len(UpperCAmelCase_)):
a : int = np.asmatrix(datas_test[p])
a : int = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a : Tuple = self.pooling(UpperCAmelCase_ , self.size_poolinga)
a : str = self._expand(UpperCAmelCase_)
a : str = data_bp_input
a : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
a : Union[str, Any] = self.sig(UpperCAmelCase_)
a : List[Any] = bp_outa * self.wkj.T - self.thre_bpa
a : Tuple = self.sig(UpperCAmelCase_)
produce_out.extend(bp_outa.getA().tolist())
a : Optional[int] = [list(map(self.do_round , UpperCAmelCase_)) for each in produce_out]
return np.asarray(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = np.asmatrix(UpperCAmelCase_)
a : int = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a : Union[str, Any] = self.pooling(UpperCAmelCase_ , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 356 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 0 |
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = n
a : Dict = [None] * self.n
a : int = 0 # index of the first element
a : Optional[int] = 0
a : Optional[Any] = 0
def __len__( self : int):
"""simple docstring"""
return self.size
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.size == 0
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
if self.size >= self.n:
raise Exception('QUEUE IS FULL')
a : int = data
a : str = (self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
if self.size == 0:
raise Exception('UNDERFLOW')
a : Union[str, Any] = self.array[self.front]
a : Optional[Any] = None
a : List[str] = (self.front + 1) % self.n
self.size -= 1
return temp
| 357 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int = 2 , snake_case : int = 1 , snake_case : int = 3 , ):
"""simple docstring"""
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case : int , snake_case : int , snake_case : int ) -> int:
return (pow(snake_case , 2 ) + step) % modulus
for _ in range(snake_case ):
# These track the position within the cycle detection logic.
a : str = seed
a : List[str] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
a : Dict = rand_fn(snake_case , snake_case , snake_case )
a : Union[str, Any] = rand_fn(snake_case , snake_case , snake_case )
a : List[str] = rand_fn(snake_case , snake_case , snake_case )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
a : int = gcd(hare - tortoise , snake_case )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
a : Dict = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
UpperCamelCase : List[str] = parser.parse_args()
UpperCamelCase : List[str] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
UpperCamelCase : Optional[int] = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''')
| 358 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int):
"""simple docstring"""
a : List[str] = data
a : Node | None = None
a : Node | None = None
def SCREAMING_SNAKE_CASE__ ( snake_case : Node | None ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def SCREAMING_SNAKE_CASE__ ( snake_case : Node | None ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def SCREAMING_SNAKE_CASE__ ( snake_case : Node ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def SCREAMING_SNAKE_CASE__ ( ) -> None: # Main function for testing.
a : List[str] = Node(1 )
a : Any = Node(2 )
a : Dict = Node(3 )
a : Optional[Any] = Node(4 )
a : Optional[Any] = Node(5 )
a : Optional[Any] = Node(6 )
a : str = Node(7 )
a : int = Node(8 )
a : Optional[Any] = Node(9 )
print(is_full_binary_tree(snake_case ) )
print(depth_of_tree(snake_case ) )
print('Tree is: ' )
display(snake_case )
if __name__ == "__main__":
main()
| 359 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 0 |
'''simple docstring'''
import math
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Union[str, Any]=0): # a graph with Node 0,1,...,N-1
"""simple docstring"""
a : Any = n
a : Optional[Any] = [
[math.inf for j in range(0 , UpperCAmelCase_)] for i in range(0 , UpperCAmelCase_)
] # adjacency matrix for weight
a : Any = [
[math.inf for j in range(0 , UpperCAmelCase_)] for i in range(0 , UpperCAmelCase_)
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int):
"""simple docstring"""
a : Optional[int] = w
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
a : Dict = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 360 | '''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int = 0 ) -> list:
"""simple docstring"""
a : int = length or len(snake_case )
a : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
a : List[Any] = list_data[i + 1], list_data[i]
a : Union[str, Any] = True
return list_data if not swapped else bubble_sort(snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = ["input_features", "attention_mask"]
def __init__( self : Any , UpperCAmelCase_ : Dict=8_0 , UpperCAmelCase_ : List[Any]=1_6_0_0_0 , UpperCAmelCase_ : Optional[Any]=8_0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=True , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_)
a : Dict = num_mel_bins
a : List[str] = do_ceptral_normalize
a : Dict = normalize_means
a : int = normalize_vars
a : Any = True
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : np.ndarray , ):
"""simple docstring"""
a : str = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
a : str = torch.from_numpy(UpperCAmelCase_).unsqueeze(0)
a : str = ta_kaldi.fbank(UpperCAmelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : float = 0.0 , ):
"""simple docstring"""
if normalize_means:
a : int = x[:input_length].mean(axis=0)
a : Any = np.subtract(UpperCAmelCase_ , UpperCAmelCase_)
if normalize_vars:
a : List[str] = x[:input_length].std(axis=0)
a : Optional[int] = np.divide(UpperCAmelCase_ , UpperCAmelCase_)
if input_length < x.shape[0]:
a : List[str] = padding_value
# make sure array is in float32
a : Optional[int] = x.astype(np.floataa)
return x
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[np.ndarray] , UpperCAmelCase_ : Optional[np.ndarray] = None):
"""simple docstring"""
a : int = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase_ , UpperCAmelCase_ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
def __call__( self : str , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
a : str = isinstance(UpperCAmelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""")
a : Tuple = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
a : Optional[Any] = [np.asarray(UpperCAmelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray):
a : Tuple = np.asarray(UpperCAmelCase_ , dtype=np.floataa)
elif isinstance(UpperCAmelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
a : str = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
a : List[Any] = [raw_speech]
# extract fbank features
a : Any = [self._extract_fbank_features(UpperCAmelCase_) for waveform in raw_speech]
# convert into correct format for padding
a : List[str] = BatchFeature({'input_features': features})
a : List[Any] = self.pad(
UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
# make sure list is in array format
a : Optional[int] = padded_inputs.get('input_features')
if isinstance(input_features[0] , UpperCAmelCase_):
a : List[str] = [np.asarray(UpperCAmelCase_ , dtype=np.floataa) for feature in input_features]
a : str = padded_inputs.get('attention_mask')
if attention_mask is not None:
a : Tuple = [np.asarray(UpperCAmelCase_ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
a : Tuple = (
np.array(UpperCAmelCase_ , dtype=np.intaa)
if self._get_padding_strategies(UpperCAmelCase_ , max_length=UpperCAmelCase_) is not PaddingStrategy.DO_NOT_PAD
else None
)
a : Tuple = self.normalize(
padded_inputs['input_features'] , attention_mask=UpperCAmelCase_)
if return_tensors is not None:
a : int = padded_inputs.convert_to_tensors(UpperCAmelCase_)
return padded_inputs
| 362 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
UpperCamelCase : List[Any] = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
UpperCamelCase : int = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
UpperCamelCase : Union[str, Any] = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a : Union[str, Any] = np.array([re.sub(UpperCAmelCase_ , '' , UpperCAmelCase_) for x in predictions])
a : str = np.array([re.sub(UpperCAmelCase_ , '' , UpperCAmelCase_) for x in references])
else:
a : List[Any] = np.asarray(UpperCAmelCase_)
a : int = np.asarray(UpperCAmelCase_)
if ignore_case:
a : List[Any] = np.char.lower(UpperCAmelCase_)
a : Optional[int] = np.char.lower(UpperCAmelCase_)
if ignore_punctuation:
a : List[str] = string.punctuation.maketrans('' , '' , string.punctuation)
a : List[Any] = np.char.translate(UpperCAmelCase_ , table=UpperCAmelCase_)
a : Tuple = np.char.translate(UpperCAmelCase_ , table=UpperCAmelCase_)
if ignore_numbers:
a : List[Any] = string.digits.maketrans('' , '' , string.digits)
a : str = np.char.translate(UpperCAmelCase_ , table=UpperCAmelCase_)
a : str = np.char.translate(UpperCAmelCase_ , table=UpperCAmelCase_)
a : Optional[Any] = predictions == references
return {"exact_match": np.mean(UpperCAmelCase_) * 1_0_0} | 363 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Optional[int] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Any = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 0 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 366 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> Any:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(snake_case , int(b / 2 ) ) * actual_power(snake_case , int(b / 2 ) )
else:
return a * actual_power(snake_case , int(b / 2 ) ) * actual_power(snake_case , int(b / 2 ) )
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(snake_case , snake_case )
return actual_power(snake_case , snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 367 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase : str = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 368 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[Any] = 'ylacombe/bark-small'
a : Optional[Any] = tempfile.mkdtemp()
a : str = 'en_speaker_1'
a : List[str] = 'This is a test string'
a : Dict = 'speaker_embeddings_path.json'
a : Union[str, Any] = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Tuple = self.get_tokenizer()
a : Optional[int] = BarkProcessor(tokenizer=UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
a : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
a : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a : Dict = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
a : Tuple = 3_5
a : int = 2
a : Any = 8
a : Dict = {
'semantic_prompt': np.ones(UpperCAmelCase_),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len)),
'fine_prompt': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
a : List[Any] = processor(text=self.input_string , voice_preset=UpperCAmelCase_)
a : Dict = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist())
# test loading voice preset from npz file
a : Union[str, Any] = os.path.join(self.tmpdirname , 'file.npz')
np.savez(UpperCAmelCase_ , **UpperCAmelCase_)
a : Optional[int] = processor(text=self.input_string , voice_preset=UpperCAmelCase_)
a : int = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist())
# test loading voice preset from the hub
a : Any = processor(text=self.input_string , voice_preset=self.voice_preset)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : int = self.get_tokenizer()
a : Union[str, Any] = BarkProcessor(tokenizer=UpperCAmelCase_)
a : Union[str, Any] = processor(text=self.input_string)
a : Tuple = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 369 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 370 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : list[list[int]] , snake_case : int , snake_case : int , snake_case : list[int] ) -> bool:
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def SCREAMING_SNAKE_CASE__ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int ) -> bool:
"""simple docstring"""
# Base Case
if curr_ind == len(snake_case ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(snake_case ) ):
if valid_connection(snake_case , snake_case , snake_case , snake_case ):
# Insert current vertex into path as next transition
a : Dict = next_ver
# Validate created path
if util_hamilton_cycle(snake_case , snake_case , curr_ind + 1 ):
return True
# Backtrack
a : Any = -1
return False
def SCREAMING_SNAKE_CASE__ ( snake_case : list[list[int]] , snake_case : int = 0 ) -> list[int]:
"""simple docstring"""
a : Dict = [-1] * (len(snake_case ) + 1)
# initialize start and end of path with starting index
a : Optional[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(snake_case , snake_case , 1 ) else []
| 371 | '''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | 0 |
'''simple docstring'''
import re
from ..utils import cached_file
# docstyle-ignore
UpperCamelCase : int = """
Human: <<task>>
Assistant: """
UpperCamelCase : int = """huggingface-tools/default-prompts"""
UpperCamelCase : Dict = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : str , snake_case : List[Any]="run" ) -> Tuple:
"""simple docstring"""
if prompt_or_repo_id is None:
a : Optional[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , snake_case ) is not None:
return prompt_or_repo_id
a : int = cached_file(
snake_case , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(snake_case , 'r' , encoding='utf-8' ) as f:
return f.read()
| 350 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | 0 |
'''simple docstring'''
import math
import os
import sys
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> str:
"""simple docstring"""
a : int = ''
try:
with open(snake_case , 'rb' ) as binary_file:
a : Union[str, Any] = binary_file.read()
for dat in data:
a : Optional[int] = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( snake_case : dict[str, str] , snake_case : str , snake_case : int , snake_case : str ) -> None:
"""simple docstring"""
lexicon.pop(snake_case )
a : str = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
a : Optional[Any] = '0' + lexicon[curr_key]
a : Optional[Any] = bin(snake_case )[2:]
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> str:
"""simple docstring"""
a : List[Any] = {'0': '0', '1': '1'}
a : Optional[Any] = '', ''
a : Dict = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a : int = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
a : str = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
a : Dict = lexicon[curr_string]
result += last_match_id
return result
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str ) -> str:
"""simple docstring"""
a : List[Any] = os.path.getsize(snake_case )
a : List[str] = bin(snake_case )[2:]
a : Tuple = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str ) -> None:
"""simple docstring"""
a : Union[str, Any] = 8
try:
with open(snake_case , 'wb' ) as opened_file:
a : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str ) -> None:
"""simple docstring"""
a : List[Any] = read_file_binary(snake_case )
a : Optional[Any] = compress_data(snake_case )
a : str = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 351 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Tuple=3_7 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=1_0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : int="divided_space_time" , UpperCAmelCase_ : Optional[int]=None , ):
"""simple docstring"""
a : str = parent
a : Optional[Any] = batch_size
a : Dict = image_size
a : Optional[int] = num_channels
a : List[Any] = patch_size
a : List[Any] = num_frames
a : Optional[int] = is_training
a : Any = use_labels
a : Tuple = hidden_size
a : Tuple = num_hidden_layers
a : str = num_attention_heads
a : List[Any] = intermediate_size
a : Tuple = hidden_act
a : Optional[Any] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Union[str, Any] = attention_type
a : Optional[Any] = initializer_range
a : str = scope
a : Dict = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a : Optional[Any] = (image_size // patch_size) ** 2
a : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
a : int = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size] , self.num_labels)
a : Optional[int] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Union[str, Any] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
a : List[Any] = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : List[Any] = TimesformerModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Union[str, Any] = TimesformerForVideoClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
# verify the logits shape
a : Optional[int] = torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[Any] = self.prepare_config_and_inputs()
a : str = config_and_inputs
a : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
A : Optional[Any] = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Union[str, Any] = False
A : Tuple = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[Any] = TimesformerModelTester(self)
a : List[Any] = ConfigTester(
self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=False):
"""simple docstring"""
a : List[str] = copy.deepcopy(UpperCAmelCase_)
if return_labels:
if model_class in get_values(UpperCAmelCase_):
a : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Tuple = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = model_class(UpperCAmelCase_)
a : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Any = [*signature.parameters.keys()]
a : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = TimesformerModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
if not self.has_attentions:
pass
else:
a : int = self.model_tester.prepare_config_and_inputs_for_common()
a : Any = True
for model_class in self.all_model_classes:
a : Optional[Any] = self.model_tester.seq_length
a : List[str] = self.model_tester.num_frames
a : Dict = True
a : Optional[Any] = False
a : List[str] = True
a : Optional[int] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : Dict = True
a : List[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : List[str] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
a : List[Any] = len(UpperCAmelCase_)
# Check attention is always last and order is fine
a : Union[str, Any] = True
a : Any = True
a : Optional[int] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
self.assertEqual(out_len + 1 , len(UpperCAmelCase_))
a : Any = outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]):
a : Tuple = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : List[Any] = outputs.hidden_states
a : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : str = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[int] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a : Dict = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a : Tuple = np.load(snake_case )
return list(snake_case )
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400').to(
UpperCAmelCase_)
a : Any = self.default_image_processor
a : Optional[Any] = prepare_video()
a : Any = image_processor(video[:8] , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[str] = model(**UpperCAmelCase_)
# verify the logits
a : str = torch.Size((1, 4_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Tuple = torch.tensor([-0.30_16, -0.77_13, -0.42_05]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 352 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> Optional[int]:
"""simple docstring"""
a : Dict = R'\w+[.]\d+'
a : Optional[int] = re.findall(snake_case , snake_case )
for pat in pats:
a : List[str] = key.replace(snake_case , '_'.join(pat.split('.' ) ) )
return key
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
a : Tuple = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a : int = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a : List[str] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a : List[str] = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a : Union[str, Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a : int = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a : Union[str, Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
a : Dict = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a : Dict = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a : List[Any] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : str , snake_case : Union[str, Any]=42 ) -> List[str]:
"""simple docstring"""
a : List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a : int = flax_model.init_weights(PRNGKey(snake_case ) )
a : List[str] = flatten_dict(snake_case )
a : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a : Any = rename_key(snake_case )
a : Tuple = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
a : Optional[int] = rename_key_and_reshape_tensor(snake_case , snake_case , snake_case )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
a : Union[str, Any] = jnp.asarray(snake_case )
return unflatten_dict(snake_case )
| 353 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('List is empty' )
return sum(snake_case ) / len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 355 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = "umt5"
A : Dict = ["past_key_values"]
def __init__( self : List[str] , UpperCAmelCase_ : Optional[int]=2_5_0_1_1_2 , UpperCAmelCase_ : str=5_1_2 , UpperCAmelCase_ : Tuple=6_4 , UpperCAmelCase_ : Optional[Any]=1_0_2_4 , UpperCAmelCase_ : Tuple=8 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=6 , UpperCAmelCase_ : str=3_2 , UpperCAmelCase_ : Union[str, Any]=1_2_8 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : List[Any]="gated-gelu" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]="T5Tokenizer" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : str=0 , **UpperCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=UpperCAmelCase_ , tokenizer_class=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : Union[str, Any] = vocab_size
a : Optional[int] = d_model
a : Any = d_kv
a : Optional[Any] = d_ff
a : str = num_layers
a : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a : Optional[int] = num_heads
a : int = relative_attention_num_buckets
a : Optional[int] = relative_attention_max_distance
a : List[str] = dropout_rate
a : List[Any] = layer_norm_epsilon
a : Optional[int] = initializer_factor
a : Union[str, Any] = feed_forward_proj
a : Optional[Any] = use_cache
a : Dict = self.feed_forward_proj.split('-')
a : Tuple = act_info[-1]
a : str = act_info[0] == 'gated'
if len(UpperCAmelCase_) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
a : List[str] = 'gelu_new'
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return self.d_model
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return self.num_heads
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.num_layers
class UpperCamelCase ( a_ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a : List[str] = 'past_encoder_sequence + sequence'
a : Tuple = {0: 'batch'}
a : str = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return 1_3
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return 5e-4
| 356 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 0 |
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 357 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["flax"]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["flax"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["flax"]
def __init__( self : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["flax"]
def __init__( self : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["flax"]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["flax"]
def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["flax"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["flax"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["flax"]
def __init__( self : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["flax"]
def __init__( self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["flax"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['flax'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["flax"]
def __init__( self : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['flax'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['flax'])
| 358 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Dict:
a : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a : Dict = 128
elif "12-12" in model_name:
a : List[Any] = 12
a : Tuple = 12
elif "14-14" in model_name:
a : Any = 14
a : str = 14
elif "16-16" in model_name:
a : int = 16
a : Any = 16
else:
raise ValueError('Model not supported' )
a : Optional[int] = 'huggingface/label-files'
if "speech-commands" in model_name:
a : int = 35
a : List[str] = 'speech-commands-v2-id2label.json'
else:
a : Optional[int] = 527
a : int = 'audioset-id2label.json'
a : Tuple = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : Union[str, Any] = idalabel
a : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> Dict:
if "module.v" in name:
a : Dict = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
a : Tuple = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
a : Tuple = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
a : Dict = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
a : str = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
a : List[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
a : Tuple = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a : Optional[int] = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
a : List[str] = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
a : int = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : List[Any] ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(snake_case )
if "qkv" in key:
a : Union[str, Any] = key.split('.' )
a : Dict = int(key_split[3] )
a : Optional[int] = config.hidden_size
if "weight" in key:
a : int = val[:dim, :]
a : Tuple = val[dim : dim * 2, :]
a : Optional[int] = val[-dim:, :]
else:
a : Dict = val[:dim]
a : List[Any] = val[dim : dim * 2]
a : str = val[-dim:]
else:
a : Any = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> str:
a : List[str] = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Optional[int] , snake_case : Union[str, Any]=False ) -> Optional[int]:
a : Optional[int] = get_audio_spectrogram_transformer_config(snake_case )
a : int = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
a : Union[str, Any] = model_name_to_url[model_name]
a : Dict = torch.hub.load_state_dict_from_url(snake_case , map_location='cpu' )
# remove some keys
remove_keys(snake_case )
# rename some keys
a : Optional[int] = convert_state_dict(snake_case , snake_case )
# load 🤗 model
a : Dict = ASTForAudioClassification(snake_case )
model.eval()
model.load_state_dict(snake_case )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a : Optional[int] = -4.2_67_73_93 if 'speech-commands' not in model_name else -6.84_59_78
a : Dict = 4.5_68_99_74 if 'speech-commands' not in model_name else 5.5_65_45_26
a : str = 1_024 if 'speech-commands' not in model_name else 128
a : Dict = ASTFeatureExtractor(mean=snake_case , std=snake_case , max_length=snake_case )
if "speech-commands" in model_name:
a : List[Any] = load_dataset('speech_commands' , 'v0.02' , split='validation' )
a : str = dataset[0]['audio']['array']
else:
a : List[str] = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
a : Union[str, Any] = torchaudio.load(snake_case )
a : Union[str, Any] = waveform.squeeze().numpy()
a : Union[str, Any] = feature_extractor(snake_case , sampling_rate=16_000 , return_tensors='pt' )
# forward pass
a : List[Any] = model(**snake_case )
a : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a : Any = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a : Any = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a : Optional[Any] = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a : Optional[Any] = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a : Union[str, Any] = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a : int = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a : int = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
a : Tuple = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , snake_case , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(snake_case )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase : int = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 359 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 0 |
'''simple docstring'''
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 360 | '''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
from math import pi, sqrt
def SCREAMING_SNAKE_CASE__ ( snake_case : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase : int = 1.0
while num:
UpperCamelCase : List[str] = float(input("""Gamma of: """))
print(f'''gamma({num}) = {gamma(num)}''')
print("""\nEnter 0 to exit...""")
| 361 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : Tuple=3_2 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=1_6 , UpperCAmelCase_ : int=[1, 2, 1] , UpperCAmelCase_ : int=[2, 2, 4] , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=2.0 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : int=1e-5 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=1_0 , UpperCAmelCase_ : Any=8 , ):
"""simple docstring"""
a : str = parent
a : Optional[int] = batch_size
a : Dict = image_size
a : List[Any] = patch_size
a : int = num_channels
a : Any = embed_dim
a : Any = depths
a : List[Any] = num_heads
a : Dict = window_size
a : List[str] = mlp_ratio
a : Optional[Any] = qkv_bias
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : List[Any] = drop_path_rate
a : Optional[Any] = hidden_act
a : Optional[int] = use_absolute_embeddings
a : List[str] = patch_norm
a : Union[str, Any] = layer_norm_eps
a : str = initializer_range
a : Optional[Any] = is_training
a : str = scope
a : int = use_labels
a : str = type_sequence_label_size
a : Dict = encoder_stride
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : str = None
if self.use_labels:
a : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = SwinvaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
a : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
a : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : List[Any] = SwinvaForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : Optional[int] = 1
a : List[str] = SwinvaForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : List[str] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = self.type_sequence_label_size
a : Optional[Any] = SwinvaForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
a : Optional[int] = config_and_inputs
a : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A : Tuple = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A : Tuple = False
A : Union[str, Any] = False
A : Optional[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = SwinvaModelTester(self)
a : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : str = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Any = True
for model_class in self.all_model_classes:
a : Union[str, Any] = True
a : List[Any] = False
a : int = True
a : Tuple = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : List[Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Tuple = outputs.attentions
a : Union[str, Any] = len(self.model_tester.depths)
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : str = True
a : Optional[int] = config.window_size**2
a : Dict = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Optional[Any] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
a : Union[str, Any] = len(UpperCAmelCase_)
# Check attention is always last and order is fine
a : Tuple = True
a : Optional[int] = True
a : List[str] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Any = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
if hasattr(self.model_tester , 'num_hidden_states_types'):
a : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
a : Optional[int] = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase_))
a : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : Tuple = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Union[str, Any] = outputs.hidden_states
a : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# Swinv2 has a different seq_length
a : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
a : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
a : Any = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
a : List[Any] = reshaped_hidden_states[0].shape
a : int = (
reshaped_hidden_states[0].view(UpperCAmelCase_ , UpperCAmelCase_ , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
a : str = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = 3
a : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
a : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
a : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
a : int = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Any = SwinvaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : int = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = _config_zero_init(UpperCAmelCase_)
for model_class in self.all_model_classes:
a : int = model_class(config=UpperCAmelCase_)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Any = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256').to(
UpperCAmelCase_)
a : Dict = self.default_image_processor
a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a : List[Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : Any = model(**UpperCAmelCase_)
# verify the logits
a : Any = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.39_47, -0.43_06, 0.00_26]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 362 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> list[str]:
"""simple docstring"""
a : List[str] = []
a : Optional[int] = 11
a : List[str] = int('1' + '0' * digit_len )
for num in range(snake_case , snake_case ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case , snake_case ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
a : Optional[Any] = 10
return solutions
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 2 ) -> int:
"""simple docstring"""
a : Optional[int] = 1.0
for fraction in fraction_list(snake_case ):
a : Optional[int] = Fraction(snake_case )
result *= frac.denominator / frac.numerator
return int(snake_case )
if __name__ == "__main__":
print(solution()) | 363 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : str = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = "lilt"
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : List[Any]=1_2 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : Dict=3_0_7_2 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=5_1_2 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-12 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Optional[Any]="absolute" , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : int=1_0_2_4 , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : int = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : List[str] = num_attention_heads
a : Tuple = hidden_act
a : List[Any] = intermediate_size
a : Tuple = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : int = max_position_embeddings
a : int = type_vocab_size
a : int = initializer_range
a : Union[str, Any] = layer_norm_eps
a : Union[str, Any] = position_embedding_type
a : Optional[int] = classifier_dropout
a : int = channel_shrink_ratio
a : Dict = max_ad_position_embeddings
| 364 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase : Tuple = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
UpperCamelCase : Tuple = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
UpperCamelCase : Union[str, Any] = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string'), 'prediction_text': datasets.Value('string')},
'references': {
'id': datasets.Value('string'),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string'),
'answer_start': datasets.Value('int32'),
}),
},
}) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
a : List[str] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
a : List[str] = evaluate(dataset=UpperCAmelCase_ , predictions=UpperCAmelCase_)
return score
| 365 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 0 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : str = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "xlm-prophetnet"
A : Optional[int] = ["past_key_values"]
A : Optional[int] = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[Union[str, Callable]] = "gelu" , UpperCAmelCase_ : Optional[int] = 3_0_5_2_2 , UpperCAmelCase_ : Optional[int] = 1_0_2_4 , UpperCAmelCase_ : Optional[int] = 4_0_9_6 , UpperCAmelCase_ : Optional[int] = 1_2 , UpperCAmelCase_ : Optional[int] = 1_6 , UpperCAmelCase_ : Optional[int] = 4_0_9_6 , UpperCAmelCase_ : Optional[int] = 1_2 , UpperCAmelCase_ : Optional[int] = 1_6 , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[int] = 5_1_2 , UpperCAmelCase_ : Optional[float] = 0.02 , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[int] = 0 , UpperCAmelCase_ : Optional[int] = 2 , UpperCAmelCase_ : Optional[int] = 3_2 , UpperCAmelCase_ : Optional[int] = 1_2_8 , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : Optional[float] = 0.0 , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[int] = 0 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : Optional[int] = 2 , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : Dict = vocab_size
a : List[Any] = hidden_size
a : Union[str, Any] = encoder_ffn_dim
a : List[str] = num_encoder_layers
a : List[Any] = num_encoder_attention_heads
a : str = decoder_ffn_dim
a : Tuple = num_decoder_layers
a : Tuple = num_decoder_attention_heads
a : Union[str, Any] = max_position_embeddings
a : Any = init_std # Normal(0, this parameter)
a : List[str] = activation_function
# parameters for xlmprophetnet
a : str = ngram
a : Dict = num_buckets
a : Dict = relative_max_distance
a : Dict = disable_ngram_loss
a : Tuple = eps
# 3 Types of Dropout
a : Union[str, Any] = attention_dropout
a : Optional[int] = activation_dropout
a : Dict = dropout
a : Union[str, Any] = use_cache
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , add_cross_attention=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.')
| 366 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : UNetaDModel
A : ScoreSdeVeScheduler
def __init__( self : List[str] , UpperCAmelCase_ : UNetaDModel , UpperCAmelCase_ : ScoreSdeVeScheduler):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 2_0_0_0 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.unet.config.sample_size
a : str = (batch_size, 3, img_size, img_size)
a : List[Any] = self.unet
a : Optional[int] = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_) * self.scheduler.init_noise_sigma
a : Union[str, Any] = sample.to(self.device)
self.scheduler.set_timesteps(UpperCAmelCase_)
self.scheduler.set_sigmas(UpperCAmelCase_)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
a : str = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
a : Tuple = self.unet(UpperCAmelCase_ , UpperCAmelCase_).sample
a : int = self.scheduler.step_correct(UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
# prediction step
a : Dict = model(UpperCAmelCase_ , UpperCAmelCase_).sample
a : Optional[Any] = self.scheduler.step_pred(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_)
a : int = output.prev_sample, output.prev_sample_mean
a : str = sample_mean.clamp(0 , 1)
a : Optional[int] = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a : Any = self.numpy_to_pil(UpperCAmelCase_)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCAmelCase_)
| 367 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345 | 0 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> bool:
"""simple docstring"""
a : Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : float = 1 / 12_345 ) -> int:
"""simple docstring"""
a : Optional[int] = 0
a : Optional[int] = 0
a : int = 3
while True:
a : List[str] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(snake_case ):
a : Any = int(snake_case )
total_partitions += 1
if check_partition_perfect(snake_case ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(snake_case )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 368 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Dict = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "markuplm"
def __init__( self : int , UpperCAmelCase_ : Dict=3_0_5_2_2 , UpperCAmelCase_ : Optional[Any]=7_6_8 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : List[str]=1_2 , UpperCAmelCase_ : int=3_0_7_2 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Any=5_1_2 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Tuple=1e-12 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Any=2_5_6 , UpperCAmelCase_ : Optional[int]=1_0_2_4 , UpperCAmelCase_ : Optional[Any]=2_1_6 , UpperCAmelCase_ : Union[str, Any]=1_0_0_1 , UpperCAmelCase_ : List[Any]=3_2 , UpperCAmelCase_ : List[Any]=5_0 , UpperCAmelCase_ : str="absolute" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : str = vocab_size
a : Tuple = hidden_size
a : Any = num_hidden_layers
a : int = num_attention_heads
a : Dict = hidden_act
a : Optional[int] = intermediate_size
a : Optional[Any] = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : List[Any] = type_vocab_size
a : str = initializer_range
a : Dict = layer_norm_eps
a : Dict = position_embedding_type
a : Optional[int] = use_cache
a : Dict = classifier_dropout
# additional properties
a : Optional[Any] = max_depth
a : Optional[Any] = max_xpath_tag_unit_embeddings
a : Tuple = max_xpath_subs_unit_embeddings
a : int = tag_pad_id
a : Dict = subs_pad_id
a : List[str] = xpath_unit_hidden_size
| 369 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 0 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase : List[Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
UpperCamelCase : Any = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
UpperCamelCase : List[str] = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float')),
"references": datasets.Sequence(datasets.Value('float')),
}
else:
return {
"predictions": datasets.Value('float'),
"references": datasets.Value('float'),
}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict="uniform_average" , UpperCAmelCase_ : Optional[Any]=True):
"""simple docstring"""
a : Optional[Any] = mean_squared_error(
UpperCAmelCase_ , UpperCAmelCase_ , sample_weight=UpperCAmelCase_ , multioutput=UpperCAmelCase_ , squared=UpperCAmelCase_)
return {"mse": mse}
| 370 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] , *UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : Optional[int] = eval_examples
a : Union[str, Any] = post_process_function
a : Union[str, Any] = quant_trainer_args
a : str = 1_2_8 # default number of calibration samples
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : int=None):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
a : Dict = calib_dataset if calib_dataset is not None else self.calib_dataset
a : Optional[Any] = self._remove_unused_columns(UpperCAmelCase_ , description='Calibration')
return DataLoader(
UpperCAmelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Tuple=None):
"""simple docstring"""
a : List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
a : Union[str, Any] = self.get_calib_dataloader(UpperCAmelCase_)
a : int = self.model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args , calib=UpperCAmelCase_)
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase_)
logger.info('***** Running calibration *****')
logger.info(f""" Num examples = {self.calib_num}""")
logger.info(f""" Batch size = {calib_dataloader.batch_size}""")
for step, inputs in enumerate(UpperCAmelCase_):
# Prediction step
a : Union[str, Any] = self.prediction_step(UpperCAmelCase_ , UpperCAmelCase_ , prediction_loss_only=UpperCAmelCase_)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase_ , self.quant_trainer_args)
a : Dict = model
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : str = "eval"):
"""simple docstring"""
a : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
a : Optional[int] = self.get_eval_dataloader(UpperCAmelCase_)
a : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a : Dict = self.compute_metrics
a : List[Any] = None
a : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a : str = eval_loop(
UpperCAmelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
a : List[Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
a : Optional[Any] = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions)
a : List[str] = self.compute_metrics(UpperCAmelCase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"""{metric_key_prefix}_"""):
a : Optional[Any] = metrics.pop(UpperCAmelCase_)
self.log(UpperCAmelCase_)
else:
a : int = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
a : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase_)
return metrics
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str = "test"):
"""simple docstring"""
a : str = self.get_test_dataloader(UpperCAmelCase_)
# Temporarily disable metric computation, we will do it in the loop here.
a : str = self.compute_metrics
a : Optional[int] = None
a : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a : str = eval_loop(
UpperCAmelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
a : int = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
a : Optional[int] = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions , 'predict')
a : int = self.compute_metrics(UpperCAmelCase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"""{metric_key_prefix}_"""):
a : Any = metrics.pop(UpperCAmelCase_)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : List[str]="./"):
"""simple docstring"""
a : int = self.eval_dataset
a : List[Any] = self.get_eval_dataloader(UpperCAmelCase_)
a : List[str] = next(iter(UpperCAmelCase_))
# saving device - to make it consistent
a : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
a : int = tuple(v.to(UpperCAmelCase_) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
a : List[str] = True
a : Optional[int] = self.model.to(UpperCAmelCase_)
model.eval()
model.float()
a : str = model.module if hasattr(UpperCAmelCase_ , 'module') else model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args)
a : str = os.path.join(UpperCAmelCase_ , 'model.onnx')
logger.info(f"""exporting model to {output_model_file}""")
a : List[str] = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , export_params=UpperCAmelCase_ , opset_version=1_3 , do_constant_folding=UpperCAmelCase_ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=UpperCAmelCase_ , )
logger.info('onnx export finished')
| 371 | '''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | 0 |
'''simple docstring'''
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : str = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "falcon"
A : Optional[int] = ["past_key_values"]
def __init__( self : str , UpperCAmelCase_ : str=6_5_0_2_4 , UpperCAmelCase_ : List[Any]=4_5_4_4 , UpperCAmelCase_ : str=3_2 , UpperCAmelCase_ : int=7_1 , UpperCAmelCase_ : List[Any]=1e-5 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[int]=1_1 , UpperCAmelCase_ : List[Any]=1_1 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
a : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a : Optional[int] = kwargs.pop('n_embed' , UpperCAmelCase_)
a : int = hidden_size if n_embed is None else n_embed
a : Dict = num_hidden_layers
a : Tuple = num_attention_heads
a : List[Any] = layer_norm_epsilon
a : Tuple = initializer_range
a : Any = use_cache
a : str = hidden_dropout
a : Optional[Any] = attention_dropout
a : List[str] = bos_token_id
a : List[str] = eos_token_id
a : List[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
a : Dict = alibi
a : Any = new_decoder_architecture
a : int = multi_query # Ignored when new_decoder_architecture is True
a : str = parallel_attn
a : Optional[Any] = bias
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return not self.alibi
| 351 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase : List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
| 352 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Dict = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = "dpr"
def __init__( self : Optional[int] , UpperCAmelCase_ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase_ : int=7_6_8 , UpperCAmelCase_ : List[Any]=1_2 , UpperCAmelCase_ : List[Any]=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Dict=5_1_2 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]="absolute" , UpperCAmelCase_ : int = 0 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Optional[int] = vocab_size
a : str = hidden_size
a : Dict = num_hidden_layers
a : List[str] = num_attention_heads
a : Optional[Any] = hidden_act
a : Optional[int] = intermediate_size
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Any = type_vocab_size
a : Tuple = initializer_range
a : Tuple = layer_norm_eps
a : Dict = projection_dim
a : Optional[int] = position_embedding_type
| 354 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
def SCREAMING_SNAKE_CASE__ ( *snake_case : str , **snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] , **snake_case : List[Any] ) -> str:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] , **snake_case : Dict ) -> Any:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : List[Any] , **snake_case : int ) -> List[Any]:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Optional[Any] , **snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Optional[Any] , **snake_case : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Any , **snake_case : List[str] ) -> str:
"""simple docstring"""
requires_backends(snake_case , ['torch'] )
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : str = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : str = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["torch"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : str = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["torch"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Tuple = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : str = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["torch"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(cls , ['torch'])
| 355 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 0 |
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = """▁"""
UpperCamelCase : List[str] = {"""vocab_file""": """prophetnet.tokenizer"""}
UpperCamelCase : Tuple = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
UpperCamelCase : Optional[int] = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
UpperCamelCase : Optional[int] = {
"""microsoft/xprophetnet-large-wiki100-cased""": 512,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
a : Optional[Any] = collections.OrderedDict()
with open(snake_case , 'r' , encoding='utf-8' ) as reader:
a : Tuple = reader.readlines()
for index, token in enumerate(snake_case ):
a : Any = token.rstrip('\n' )
a : Union[str, Any] = index
return vocab
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict="[SEP]" , UpperCAmelCase_ : Dict="[SEP]" , UpperCAmelCase_ : Dict="[SEP]" , UpperCAmelCase_ : List[str]="[UNK]" , UpperCAmelCase_ : int="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Tuple="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece')
raise
a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
a : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
a : Dict = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(1_0):
a : Union[str, Any] = f"""[unused{i}]"""
a : Union[str, Any] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
a : Optional[Any] = 1_2
a : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_)
def __getstate__( self : str):
"""simple docstring"""
a : List[Any] = self.__dict__.copy()
a : int = None
return state
def __setstate__( self : List[str] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : int = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece')
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a : Any = {}
a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_)) + [1]
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return len(self.sp_model) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : int = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Any):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a : List[str] = self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Optional[Any] = ''.join(UpperCAmelCase_).replace(UpperCAmelCase_ , ' ').strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a : Optional[int] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , 'wb') as fi:
a : str = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
a : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 356 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : int = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "bridgetower_vision_model"
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=7_6_8 , UpperCAmelCase_ : Union[str, Any]=1_2 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[str]=1_6 , UpperCAmelCase_ : int=2_8_8 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Any=1e-05 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=False , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : Optional[int] = hidden_size
a : Tuple = num_hidden_layers
a : Any = num_channels
a : Tuple = patch_size
a : Optional[int] = image_size
a : Union[str, Any] = initializer_factor
a : Optional[Any] = layer_norm_eps
a : Dict = stop_gradient
a : Dict = share_layernorm
a : Any = remove_last_layer
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : List[Any] = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
if config_dict.get('model_type') == "bridgetower":
a : Tuple = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Union[str, Any] = "bridgetower_text_model"
def __init__( self : Any , UpperCAmelCase_ : int=5_0_2_6_5 , UpperCAmelCase_ : List[str]=7_6_8 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : Union[str, Any]=1_2 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Dict=3_0_7_2 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=5_1_4 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : List[Any]=1e-05 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : List[Any]="absolute" , UpperCAmelCase_ : List[Any]=True , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[Any] = vocab_size
a : Dict = hidden_size
a : Optional[Any] = num_hidden_layers
a : str = num_attention_heads
a : Any = hidden_act
a : Optional[int] = initializer_factor
a : Any = intermediate_size
a : int = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : List[Any] = max_position_embeddings
a : Dict = type_vocab_size
a : str = layer_norm_eps
a : List[str] = position_embedding_type
a : Any = use_cache
a : List[Any] = pad_token_id
a : Optional[int] = bos_token_id
a : List[Any] = eos_token_id
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Union[str, Any] = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
if config_dict.get('model_type') == "bridgetower":
a : int = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "bridgetower"
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=7_6_8 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : List[str]=1e-05 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Any="add" , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : Optional[int]=6 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : Optional[Any] = kwargs.pop('text_config_dict' , UpperCAmelCase_)
a : Optional[int] = kwargs.pop('vision_config_dict' , UpperCAmelCase_)
super().__init__(**UpperCAmelCase_)
a : str = share_cross_modal_transformer_layers
a : List[str] = hidden_act
a : Union[str, Any] = hidden_size
a : Optional[Any] = initializer_factor
a : List[str] = layer_norm_eps
a : str = share_link_tower_layers
a : List[str] = link_tower_type
a : Union[str, Any] = num_attention_heads
a : Any = num_hidden_layers
a : Union[str, Any] = tie_word_embeddings
a : str = init_layernorm_from_vision_encoder
if text_config is None:
a : Any = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.')
if vision_config is None:
a : List[str] = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.')
a : Optional[Any] = BridgeTowerTextConfig(**UpperCAmelCase_)
a : int = BridgeTowerVisionConfig(**UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , UpperCAmelCase_ : BridgeTowerTextConfig , UpperCAmelCase_ : BridgeTowerVisionConfig , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = copy.deepcopy(self.__dict__)
a : Tuple = self.text_config.to_dict()
a : Any = self.vision_config.to_dict()
a : Optional[Any] = self.__class__.model_type
return output
| 357 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
a
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 358 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = mock.Mock()
a : Tuple = 5_0_0
a : Union[str, Any] = {}
a : List[str] = HTTPError
a : int = {}
# Download this model to make sure it's in the cache.
a : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_) as mock_head:
a : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = mock.Mock()
a : Any = 5_0_0
a : Tuple = {}
a : str = HTTPError
a : Optional[Any] = {}
# Download this model to make sure it's in the cache.
a : Any = GPTaTokenizerFast.from_pretrained('gpt2')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_) as mock_head:
a : Tuple = GPTaTokenizerFast.from_pretrained('gpt2')
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
try:
a : Optional[int] = tempfile.mktemp()
with open(UpperCAmelCase_ , 'wb') as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , UpperCAmelCase_)
a : List[str] = AlbertTokenizer.from_pretrained(UpperCAmelCase_)
finally:
os.remove(UpperCAmelCase_)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json'):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb') as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , UpperCAmelCase_)
a : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model')
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
A : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any):
"""simple docstring"""
a : str = TOKEN
HfFolder.save_token(UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer')
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a : Optional[Any] = os.path.join(UpperCAmelCase_ , 'vocab.txt')
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
a : int = BertTokenizer(UpperCAmelCase_)
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token)
a : Optional[int] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ , repo_id='test-tokenizer' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
a : Union[str, Any] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a : Any = os.path.join(UpperCAmelCase_ , 'vocab.txt')
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
a : str = BertTokenizer(UpperCAmelCase_)
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token)
a : int = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCAmelCase_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
a : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
a : Any = os.path.join(UpperCAmelCase_ , 'vocab.txt')
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
a : int = CustomTokenizer(UpperCAmelCase_)
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
a : Union[str, Any] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=UpperCAmelCase_)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
a : Union[str, Any] = os.path.join(UpperCAmelCase_ , 'vocab.txt')
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
a : Tuple = BertTokenizerFast.from_pretrained(UpperCAmelCase_)
bert_tokenizer.save_pretrained(UpperCAmelCase_)
a : List[str] = CustomTokenizerFast.from_pretrained(UpperCAmelCase_)
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
a : Any = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=UpperCAmelCase_)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast')
a : Union[str, Any] = AutoTokenizer.from_pretrained(
f"""{USER}/test-dynamic-tokenizer""" , use_fast=UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Dict = Trie()
trie.add('Hello 友達')
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}})
trie.add('Hello')
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}})
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS] This is a extra_id_100'])
trie.add('[CLS]')
trie.add('extra_id_1')
trie.add('extra_id_100')
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS]', ' This is a ', 'extra_id_100'])
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : List[Any] = Trie()
trie.add('A')
self.assertEqual(trie.split('ABC') , ['A', 'BC'])
self.assertEqual(trie.split('BCA') , ['BC', 'A'])
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[Any] = Trie()
trie.add('TOKEN]')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[str] = Trie()
trie.add('A')
trie.add('P')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : int = Trie()
trie.add('AB')
trie.add('B')
trie.add('C')
self.assertEqual(trie.split('ABC') , ['AB', 'C'])
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = Trie()
trie.add('ABC')
trie.add('B')
trie.add('CD')
self.assertEqual(trie.split('ABCD') , ['ABC', 'D'])
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Dict = Trie()
a : Dict = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3])
self.assertEqual(UpperCAmelCase_ , ['AB', 'C'])
| 359 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : List[str] = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360 | '''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 361 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> int:
"""simple docstring"""
a : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
a : Any = n - k
# Calculate C(n,k)
for i in range(snake_case ):
result *= n - i
result //= i + 1
return result
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , snake_case ) // (node_count + 1)
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError('factorial() not defined for negative values' )
a : Dict = 1
for i in range(1 , n + 1 ):
result *= i
return result
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
return catalan_number(snake_case ) * factorial(snake_case )
if __name__ == "__main__":
UpperCamelCase : Tuple = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 362 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.