code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowercase ( enum.Enum ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Any = 1
_UpperCAmelCase : str = 2
@add_end_docstrings(UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : Dict , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
SCREAMING_SNAKE_CASE_: Optional[Any] = None
if self.model.config.prefix is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
SCREAMING_SNAKE_CASE_: List[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self._sanitize_parameters(prefix=lowerCAmelCase__ , **self._forward_params)
SCREAMING_SNAKE_CASE_: int = {**self._preprocess_params, **preprocess_params}
SCREAMING_SNAKE_CASE_: List[Any] = {**self._forward_params, **forward_params}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: str = {}
if prefix is not None:
SCREAMING_SNAKE_CASE_: str = prefix
if prefix:
SCREAMING_SNAKE_CASE_: str = self.tokenizer(
lowerCAmelCase__ , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework)
SCREAMING_SNAKE_CASE_: Dict = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
" [None, 'hole']")
SCREAMING_SNAKE_CASE_: Any = handle_long_generation
preprocess_params.update(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = generate_kwargs
SCREAMING_SNAKE_CASE_: str = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
SCREAMING_SNAKE_CASE_: List[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
SCREAMING_SNAKE_CASE_: Dict = ReturnType.TENSORS
if return_type is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] = return_type
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE_: List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
if len(lowerCAmelCase__) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
SCREAMING_SNAKE_CASE_: Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True})
return super()._parse_and_tokenize(*lowerCAmelCase__ , **lowerCAmelCase__)
def __call__( self : Optional[int] , lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Optional[int]):
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict="" , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: Any = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework)
SCREAMING_SNAKE_CASE_: Dict = prompt_text
if handle_long_generation == "hole":
SCREAMING_SNAKE_CASE_: List[Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
SCREAMING_SNAKE_CASE_: str = generate_kwargs["max_new_tokens"]
else:
SCREAMING_SNAKE_CASE_: List[Any] = generate_kwargs.get("max_length" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected")
if cur_len + new_tokens > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_: Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length")
SCREAMING_SNAKE_CASE_: Tuple = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
SCREAMING_SNAKE_CASE_: Optional[Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Dict = model_inputs["input_ids"]
SCREAMING_SNAKE_CASE_: Tuple = model_inputs.get("attention_mask" , lowerCAmelCase__)
# Allow empty prompts
if input_ids.shape[1] == 0:
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Any = None
SCREAMING_SNAKE_CASE_: Tuple = 1
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = input_ids.shape[0]
SCREAMING_SNAKE_CASE_: int = model_inputs.pop("prompt_text")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
SCREAMING_SNAKE_CASE_: Optional[Any] = generate_kwargs.pop("prefix_length" , 0)
if prefix_length > 0:
SCREAMING_SNAKE_CASE_: str = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
SCREAMING_SNAKE_CASE_: Union[str, Any] = generate_kwargs.get("max_length") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
SCREAMING_SNAKE_CASE_: Dict = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
SCREAMING_SNAKE_CASE_: Any = self.model.generate(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = generated_sequence.shape[0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE_: int = generated_sequence.reshape(lowerCAmelCase__ , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_: Dict = tf.reshape(lowerCAmelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int]=ReturnType.FULL_TEXT , lowerCAmelCase__ : Union[str, Any]=True):
SCREAMING_SNAKE_CASE_: Any = model_outputs["generated_sequence"][0]
SCREAMING_SNAKE_CASE_: Optional[Any] = model_outputs["input_ids"]
SCREAMING_SNAKE_CASE_: Dict = model_outputs["prompt_text"]
SCREAMING_SNAKE_CASE_: Optional[Any] = generated_sequence.numpy().tolist()
SCREAMING_SNAKE_CASE_: Optional[int] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
SCREAMING_SNAKE_CASE_: Union[str, Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
SCREAMING_SNAKE_CASE_: str = self.tokenizer.decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
SCREAMING_SNAKE_CASE_: Tuple = 0
else:
SCREAMING_SNAKE_CASE_: str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , ))
if return_type == ReturnType.FULL_TEXT:
SCREAMING_SNAKE_CASE_: List[str] = prompt_text + text[prompt_length:]
else:
SCREAMING_SNAKE_CASE_: int = text[prompt_length:]
SCREAMING_SNAKE_CASE_: Optional[Any] = {"generated_text": all_text}
records.append(lowerCAmelCase__)
return records
| 13 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=2 , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_: str = image_size
SCREAMING_SNAKE_CASE_: Tuple = patch_size
SCREAMING_SNAKE_CASE_: int = num_channels
SCREAMING_SNAKE_CASE_: List[str] = is_training
SCREAMING_SNAKE_CASE_: str = use_labels
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Any = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Dict = scope
SCREAMING_SNAKE_CASE_: Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_: List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Dict = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = ViTForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = ViTModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_: str = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Any = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE_: str = ViTModel.from_pretrained("facebook/dino-vits8").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480)
SCREAMING_SNAKE_CASE_: List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: int = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Tuple = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto")
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Dict = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: str = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
| 13 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase : str = ["""small""", """medium""", """large"""]
lowerCAmelCase : Dict = """lm_head.decoder.weight"""
lowerCAmelCase : Optional[Any] = """lm_head.weight"""
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = torch.load(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
lowerCAmelCase : List[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase : Tuple = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
lowerCAmelCase : List[Any] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 13 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 1 |
import math
def A_ ( _UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( _UpperCAmelCase = 1_00_01 ):
try:
SCREAMING_SNAKE_CASE_: Any = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
SCREAMING_SNAKE_CASE_: list[int] = []
SCREAMING_SNAKE_CASE_: str = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13 | 1 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE_: str = mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(
mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
SCREAMING_SNAKE_CASE_: Any = val
return f[i][j]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE_: Optional[int] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
SCREAMING_SNAKE_CASE_: List[str] = dp[i - 1][w_]
return dp[n][w_], dp
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not (isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
SCREAMING_SNAKE_CASE_: int = len(_UpperCAmelCase )
if num_items != len(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = (
"The number of weights must be the same as the number of values.\n"
f"But got {num_items} weights and {len(_UpperCAmelCase )} values"
)
raise ValueError(_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
if not isinstance(wt[i] , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = (
"All weights must be integers but got weight of "
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: set = set()
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return optimal_val, example_optional_set
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
else:
optimal_set.add(_UpperCAmelCase )
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , i - 1 , j - wt[i - 1] , _UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = [3, 2, 4, 4]
lowerCAmelCase : str = [4, 3, 2, 3]
lowerCAmelCase : Any = 4
lowerCAmelCase : List[Any] = 6
lowerCAmelCase : List[str] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCAmelCase , lowerCAmelCase : Any = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCAmelCase , lowerCAmelCase : Any = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 13 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = XGLMTokenizer
_UpperCAmelCase : List[Any] = XGLMTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[Any] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "<pad>"
SCREAMING_SNAKE_CASE_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase__) , 1008)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def _SCREAMING_SNAKE_CASE ( self : str):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
SCREAMING_SNAKE_CASE_: Tuple = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "Hello World!"
SCREAMING_SNAKE_CASE_: Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
# fmt: off
SCREAMING_SNAKE_CASE_: str = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 13 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowerCAmelCase : Any = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Tuple = '''ernie_m'''
_UpperCAmelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , lowerCAmelCase__ : int = 25_0002 , lowerCAmelCase__ : int = 768 , lowerCAmelCase__ : int = 12 , lowerCAmelCase__ : int = 12 , lowerCAmelCase__ : int = 3072 , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 514 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : float = 1E-05 , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=0.0 , **lowerCAmelCase__ : Any , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = vocab_size
SCREAMING_SNAKE_CASE_: str = hidden_size
SCREAMING_SNAKE_CASE_: Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_: Any = num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_: Any = hidden_act
SCREAMING_SNAKE_CASE_: Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = max_position_embeddings
SCREAMING_SNAKE_CASE_: Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_: List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_: str = classifier_dropout
SCREAMING_SNAKE_CASE_: Optional[int] = is_decoder
SCREAMING_SNAKE_CASE_: List[str] = act_dropout
| 13 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_: int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_: Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_: Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase : List[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 13 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_: Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_: Dict = ""
else:
SCREAMING_SNAKE_CASE_: Any = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_: str = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_: Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_: Dict = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_: List[str] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_: Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_: Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_: Tuple = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_: List[Any] = in_proj_bias[-config.hidden_size :]
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = dct.pop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = val
def A_ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: Dict = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = ViTConfig()
SCREAMING_SNAKE_CASE_: int = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE_: str = True
SCREAMING_SNAKE_CASE_: Tuple = int(vit_name[-12:-10] )
SCREAMING_SNAKE_CASE_: List[str] = int(vit_name[-9:-6] )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = 10_00
SCREAMING_SNAKE_CASE_: str = "huggingface/label-files"
SCREAMING_SNAKE_CASE_: Optional[Any] = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: Tuple = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_: Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Any = idalabel
SCREAMING_SNAKE_CASE_: Any = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(vit_name[-6:-4] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
SCREAMING_SNAKE_CASE_: Any = 1_92
SCREAMING_SNAKE_CASE_: Any = 7_68
SCREAMING_SNAKE_CASE_: int = 12
SCREAMING_SNAKE_CASE_: List[Any] = 3
elif vit_name[9:].startswith("small" ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3_84
SCREAMING_SNAKE_CASE_: Any = 15_36
SCREAMING_SNAKE_CASE_: List[str] = 12
SCREAMING_SNAKE_CASE_: Union[str, Any] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 7_68
SCREAMING_SNAKE_CASE_: List[Any] = 23_04
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: str = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
SCREAMING_SNAKE_CASE_: Optional[int] = 10_24
SCREAMING_SNAKE_CASE_: List[str] = 40_96
SCREAMING_SNAKE_CASE_: List[Any] = 24
SCREAMING_SNAKE_CASE_: Optional[Any] = 16
elif vit_name[4:].startswith("huge" ):
SCREAMING_SNAKE_CASE_: Optional[Any] = 12_80
SCREAMING_SNAKE_CASE_: List[Any] = 51_20
SCREAMING_SNAKE_CASE_: str = 32
SCREAMING_SNAKE_CASE_: List[str] = 16
# load original model from timm
SCREAMING_SNAKE_CASE_: int = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_: Dict = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE_: str = ViTModel(_UpperCAmelCase ).eval()
else:
SCREAMING_SNAKE_CASE_: Tuple = ViTForImageClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
SCREAMING_SNAKE_CASE_: int = DeiTImageProcessor(size=config.image_size )
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE_: Dict = encoding["pixel_values"]
SCREAMING_SNAKE_CASE_: int = model(_UpperCAmelCase )
if base_model:
SCREAMING_SNAKE_CASE_: Tuple = timm_model.forward_features(_UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
SCREAMING_SNAKE_CASE_: Tuple = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 13 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCAmelCase : List[str] = random.Random()
def A_ ( _UpperCAmelCase , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=None ):
if rng is None:
SCREAMING_SNAKE_CASE_: List[Any] = global_rng
SCREAMING_SNAKE_CASE_: str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Optional[int]=400 , lowerCAmelCase__ : Union[str, Any]=2000 , lowerCAmelCase__ : List[str]=24 , lowerCAmelCase__ : Any=24 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : List[str]=1_6000 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[Any]=True , ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = parent
SCREAMING_SNAKE_CASE_: Dict = batch_size
SCREAMING_SNAKE_CASE_: Optional[int] = min_seq_length
SCREAMING_SNAKE_CASE_: List[str] = max_seq_length
SCREAMING_SNAKE_CASE_: str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_: List[Any] = feature_size
SCREAMING_SNAKE_CASE_: Tuple = num_mel_bins
SCREAMING_SNAKE_CASE_: Dict = padding_value
SCREAMING_SNAKE_CASE_: List[str] = sampling_rate
SCREAMING_SNAKE_CASE_: Dict = return_attention_mask
SCREAMING_SNAKE_CASE_: List[Any] = do_normalize
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : str=False):
def _flatten(lowerCAmelCase__ : Optional[int]):
return list(itertools.chain(*lowerCAmelCase__))
if equal_length:
SCREAMING_SNAKE_CASE_: List[str] = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_: Dict = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
SCREAMING_SNAKE_CASE_: Tuple = [np.asarray(lowerCAmelCase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int = SpeechaTextFeatureExtractor if is_speech_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = SpeechaTextFeatureExtractionTester(self)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str):
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0) < 1E-3))
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0) - 1) < 1E-3))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_: Optional[Any] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
SCREAMING_SNAKE_CASE_: str = [np.asarray(lowerCAmelCase__) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_: Dict = feature_extractor(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
# Test not batched input
SCREAMING_SNAKE_CASE_: int = feature_extractor(speech_inputs[0] , return_tensors="np").input_features
SCREAMING_SNAKE_CASE_: int = feature_extractor(np_speech_inputs[0] , return_tensors="np").input_features
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# Test batched
SCREAMING_SNAKE_CASE_: Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np").input_features
SCREAMING_SNAKE_CASE_: str = feature_extractor(lowerCAmelCase__ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_: Any = [floats_list((1, x))[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_: Optional[int] = np.asarray(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = feature_extractor(lowerCAmelCase__ , return_tensors="np").input_features
SCREAMING_SNAKE_CASE_: Optional[Any] = feature_extractor(lowerCAmelCase__ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
SCREAMING_SNAKE_CASE_: int = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
SCREAMING_SNAKE_CASE_: Any = ["longest", "max_length", "do_not_pad"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = [None, 16, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Any = feature_extractor(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = inputs.input_features
SCREAMING_SNAKE_CASE_: Tuple = inputs.attention_mask
SCREAMING_SNAKE_CASE_: int = [np.sum(lowerCAmelCase__) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
SCREAMING_SNAKE_CASE_: List[str] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["longest", "max_length", "do_not_pad"]
SCREAMING_SNAKE_CASE_: str = [None, 16, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: List[str] = feature_extractor(
lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = inputs.input_features
SCREAMING_SNAKE_CASE_: Tuple = inputs.attention_mask
SCREAMING_SNAKE_CASE_: Optional[int] = [np.sum(lowerCAmelCase__) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
SCREAMING_SNAKE_CASE_: Optional[int] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
SCREAMING_SNAKE_CASE_: Any = feature_extractor(
lowerCAmelCase__ , padding="max_length" , max_length=4 , truncation=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = inputs.input_features
SCREAMING_SNAKE_CASE_: Optional[int] = inputs.attention_mask
SCREAMING_SNAKE_CASE_: Optional[int] = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1])
self._check_zero_mean_unit_variance(input_features[2])
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
SCREAMING_SNAKE_CASE_: Optional[int] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
SCREAMING_SNAKE_CASE_: Union[str, Any] = feature_extractor(
lowerCAmelCase__ , padding="longest" , max_length=4 , truncation=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Dict = inputs.input_features
SCREAMING_SNAKE_CASE_: Any = inputs.attention_mask
SCREAMING_SNAKE_CASE_: Optional[int] = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24))
SCREAMING_SNAKE_CASE_: Any = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
SCREAMING_SNAKE_CASE_: List[Any] = feature_extractor(
lowerCAmelCase__ , padding="longest" , max_length=16 , truncation=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: int = inputs.input_features
SCREAMING_SNAKE_CASE_: int = inputs.attention_mask
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
import torch
SCREAMING_SNAKE_CASE_: Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
SCREAMING_SNAKE_CASE_: List[Any] = np.random.rand(100 , 32).astype(np.floataa)
SCREAMING_SNAKE_CASE_: int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_: str = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.floataa)
SCREAMING_SNAKE_CASE_: Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Any):
from datasets import load_dataset
SCREAMING_SNAKE_CASE_: Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_: Optional[int] = ds.sort("id").select(range(lowerCAmelCase__))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# fmt: off
SCREAMING_SNAKE_CASE_: List[str] = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
])
# fmt: on
SCREAMING_SNAKE_CASE_: Dict = self._load_datasamples(1)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
SCREAMING_SNAKE_CASE_: Any = feature_extractor(lowerCAmelCase__ , return_tensors="pt").input_features
self.assertEquals(input_features.shape , (1, 584, 24))
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCAmelCase__ , atol=1E-4))
| 13 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = f"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = f"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = input_str.split("_" )
SCREAMING_SNAKE_CASE_: str = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE_: int = words[start_index:]
SCREAMING_SNAKE_CASE_: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE_: List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Any = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = "huggingface/label-files"
SCREAMING_SNAKE_CASE_: Dict = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: Tuple = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_: Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Optional[int] = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_: Union[str, Any] = BitConfig(
conv_layer=_UpperCAmelCase , num_labels=10_00 , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , )
return config
def A_ ( _UpperCAmelCase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: Any = name.replace("blocks" , "layers" )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_: int = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
SCREAMING_SNAKE_CASE_: Any = "bit." + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_: int = "bit.encoder." + name
return name
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: Dict = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: str = get_config(_UpperCAmelCase )
# load original model from timm
SCREAMING_SNAKE_CASE_: Tuple = create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_: List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[Any] = state_dict.pop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = val.squeeze() if "head" in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_: Union[str, Any] = BitForImageClassification(_UpperCAmelCase )
model.eval()
model.load_state_dict(_UpperCAmelCase )
# create image processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple = transform.transforms
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_: Tuple = BitImageProcessor(
do_resize=_UpperCAmelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE_: Dict = prepare_img()
SCREAMING_SNAKE_CASE_: int = transform(_UpperCAmelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_: str = processor(_UpperCAmelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_: int = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(f"ybelkada/{model_name}" )
processor.push_to_hub(f"ybelkada/{model_name}" )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCAmelCase : int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 13 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for _ in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: List[str] = []
for step in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )
scheduler.load_state_dict(_UpperCAmelCase )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(100):
SCREAMING_SNAKE_CASE_: Dict = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , )
for _ in range(1000):
SCREAMING_SNAKE_CASE_: List[Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase : Optional[Any] = 10
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = data
SCREAMING_SNAKE_CASE_: List[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
SCREAMING_SNAKE_CASE_: int = unwrap_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE_: List[str] = scheduler_func(self.optimizer , **lowerCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: Tuple = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"failed for {scheduler_func} in save and reload")
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = fn
def __call__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple):
return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = list(map(self , scheduler.lr_lambdas))
| 13 | 1 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = RoFormerTokenizer
_UpperCAmelCase : Any = RoFormerTokenizerFast
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[str] = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
super().setUp()
def _SCREAMING_SNAKE_CASE ( self : int , **lowerCAmelCase__ : Tuple):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , **lowerCAmelCase__ : int):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = "永和服装饰品有限公司,今天天气非常好"
SCREAMING_SNAKE_CASE_: Union[str, Any] = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , output_text.split())
SCREAMING_SNAKE_CASE_: List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_: Any = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , output_text.split())
SCREAMING_SNAKE_CASE_: Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_: int = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any):
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
pass
| 13 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowerCAmelCase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_: Optional[int] = self.input_schema.copy()
SCREAMING_SNAKE_CASE_: Dict = features[self.audio_column]
SCREAMING_SNAKE_CASE_: int = input_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 13 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = 1
@register_to_config
def __init__( self : Any , lowerCAmelCase__ : int = 1000 , lowerCAmelCase__ : Optional[Union[np.ndarray, List[float]]] = None):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowerCAmelCase__)
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE_: Any = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE_: int = 4
# running values
SCREAMING_SNAKE_CASE_: Optional[Any] = []
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, torch.device] = None):
SCREAMING_SNAKE_CASE_: Optional[Any] = num_inference_steps
SCREAMING_SNAKE_CASE_: Tuple = torch.linspace(1 , 0 , num_inference_steps + 1)[:-1]
SCREAMING_SNAKE_CASE_: int = torch.cat([steps, torch.tensor([0.0])])
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE_: str = torch.tensor(self.config.trained_betas , dtype=torch.floataa)
else:
SCREAMING_SNAKE_CASE_: Dict = torch.sin(steps * math.pi / 2) ** 2
SCREAMING_SNAKE_CASE_: str = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE_: Optional[int] = (torch.atana(self.betas , self.alphas) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE_: str = timesteps.to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = []
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : int , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler")
SCREAMING_SNAKE_CASE_: List[str] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE_: Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE_: Any = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCAmelCase__)
if len(self.ets) == 1:
SCREAMING_SNAKE_CASE_: Tuple = self.ets[-1]
elif len(self.ets) == 2:
SCREAMING_SNAKE_CASE_: Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
SCREAMING_SNAKE_CASE_: int = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE_: Optional[int] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE_: Dict = self._get_prev_sample(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : torch.FloatTensor , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Optional[int]):
return sample
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Optional[int] = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE_: int = self.betas[timestep_index]
SCREAMING_SNAKE_CASE_: Optional[Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE_: str = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE_: List[str] = (sample - sigma * ets) / max(lowerCAmelCase__ , 1E-8)
SCREAMING_SNAKE_CASE_: Dict = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Union[str, Any]):
return self.config.num_train_timesteps
| 13 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = 20
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1)
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: str = 10
SCREAMING_SNAKE_CASE_: Tuple = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_: Any = 5
SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Dict = 10
SCREAMING_SNAKE_CASE_: Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 20
SCREAMING_SNAKE_CASE_: List[str] = 4
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_: int = 5
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = 15
SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: str = 4
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_: List[Any] = 3
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = 20
SCREAMING_SNAKE_CASE_: Optional[Any] = 4
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[Any] = 5
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = 4
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: int = 15
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: List[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# with processor list
SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[str] = 15
SCREAMING_SNAKE_CASE_: List[Any] = 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 13 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = TransfoXLTokenizer
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : str):
super().setUp()
SCREAMING_SNAKE_CASE_: List[Any] = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
SCREAMING_SNAKE_CASE_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = "<unk> UNwanted , running"
SCREAMING_SNAKE_CASE_: Tuple = "<unk> unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("<unk> UNwanted , running")
self.assertListEqual(lowerCAmelCase__ , ["<unk>", "unwanted", ",", "running"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [0, 4, 8, 7])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: str = TransfoXLTokenizer(lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? ") , ["hello", "!", "how", "are", "you", "?"])
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: int = TransfoXLTokenizer(lower_case=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
SCREAMING_SNAKE_CASE_: List[str] = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = len(lowerCAmelCase__)
tokenizer.add_tokens(["new1", "new2"])
tokenizer.move_added_token("new1" , 1)
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__) , original_len + 2)
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1") , [1])
self.assertEqual(tokenizer.decode([1]) , "new1")
| 13 |
import math
import sys
def A_ ( _UpperCAmelCase ):
if number != int(_UpperCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE_: List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE_: str = sys.maxsize
SCREAMING_SNAKE_CASE_: List[Any] = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = generate_pascal_triangle(_UpperCAmelCase )
for row_idx in range(_UpperCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
SCREAMING_SNAKE_CASE_: list[list[int]] = []
for current_row_idx in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = populate_current_row(_UpperCAmelCase , _UpperCAmelCase )
triangle.append(_UpperCAmelCase )
return triangle
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = 1, 1
for current_col_idx in range(1 , _UpperCAmelCase ):
calculate_current_element(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return current_row
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: str = triangle[current_row_idx - 1][current_col_idx - 1]
SCREAMING_SNAKE_CASE_: Optional[int] = triangle[current_row_idx - 1][current_col_idx]
SCREAMING_SNAKE_CASE_: str = above_to_left_elt + above_to_right_elt
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
SCREAMING_SNAKE_CASE_: list[list[int]] = [[1]]
for row_index in range(1 , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = [0] + result[-1] + [0]
SCREAMING_SNAKE_CASE_: Tuple = row_index + 1
# Calculate the number of distinct elements in a row
SCREAMING_SNAKE_CASE_: Any = sum(divmod(_UpperCAmelCase , 2 ) )
SCREAMING_SNAKE_CASE_: Optional[int] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
SCREAMING_SNAKE_CASE_: Tuple = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
SCREAMING_SNAKE_CASE_: List[str] = row_first_half + row_second_half
result.append(_UpperCAmelCase )
return result
def A_ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) -> None:
SCREAMING_SNAKE_CASE_: int = f"{func.__name__}({value})"
SCREAMING_SNAKE_CASE_: List[str] = timeit(f"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 13 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | 1 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13 | 1 |
# Algorithm for the pigeonhole sorting
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase ) # min() finds the minimum value
SCREAMING_SNAKE_CASE_: Tuple = max(_UpperCAmelCase ) # max() finds the maximum value
SCREAMING_SNAKE_CASE_: str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE_: List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE_: int = 0
for count in range(_UpperCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE_: str = count + min_val
i += 1
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[str] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_UpperCAmelCase )
print("Sorted order is:" , " ".join(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 13 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Dict = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DetaConfig(
backbone_config=_UpperCAmelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_UpperCAmelCase , with_box_refine=_UpperCAmelCase , two_stage=_UpperCAmelCase , )
# set labels
SCREAMING_SNAKE_CASE_: Dict = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE_: Dict = 3_66
SCREAMING_SNAKE_CASE_: str = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE_: Tuple = 91
SCREAMING_SNAKE_CASE_: List[str] = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE_: Tuple = num_labels
SCREAMING_SNAKE_CASE_: Optional[Any] = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE_: int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Any = idalabel
SCREAMING_SNAKE_CASE_: Any = {v: k for k, v in idalabel.items()}
return config
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = dct.pop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = val
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_: Optional[int] = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_: List[Any] = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_: List[str] = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE_: List[str] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE_: List[Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_: List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_: Optional[int] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE_: Optional[int] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# transformer decoder self-attention layers
SCREAMING_SNAKE_CASE_: int = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE_: Optional[Any] = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE_: Union[str, Any] = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_: int = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE_: Tuple = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE_: List[Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_: Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE_: List[str] = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE_: int = in_proj_bias[-hidden_size:]
def A_ ( ):
SCREAMING_SNAKE_CASE_: Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: List[str] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = get_deta_config(_UpperCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE_: Optional[Any] = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE_: Tuple = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f"Model name {model_name} not supported" )
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.load(_UpperCAmelCase , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(_UpperCAmelCase , param.shape )
# rename keys
SCREAMING_SNAKE_CASE_: str = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE_: Any = state_dict.pop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE_: Union[str, Any] = state_dict.pop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE_: str = state_dict.pop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_: Optional[int] = DetaForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_UpperCAmelCase )
# load image processor
SCREAMING_SNAKE_CASE_: str = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Any = processor(images=_UpperCAmelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE_: str = encoding["pixel_values"]
SCREAMING_SNAKE_CASE_: Optional[int] = model(pixel_values.to(_UpperCAmelCase ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE_: int = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE_: int = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
SCREAMING_SNAKE_CASE_: List[str] = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_UpperCAmelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_UpperCAmelCase ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f"jozhang97/{model_name}" )
processor.push_to_hub(f"jozhang97/{model_name}" )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase : Any = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 13 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : List[str] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
from collections.abc import Callable
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Callable | None = None):
# Stores actual heap items.
SCREAMING_SNAKE_CASE_: list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_: dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_: Any = key or (lambda lowerCAmelCase__: x)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int):
return int((i - 1) / 2) if i > 0 else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 2)
return right if 0 < right < self.size else None
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.arr[j], self.arr[i]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
return self.arr[i][1] < self.arr[j][1]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Any = self._left(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self._right(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = i
if left is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[int] = left
if right is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = right
return valid_parent
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = self._parent(lowerCAmelCase__)
while parent is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = parent, self._parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = self._get_valid_parent(lowerCAmelCase__)
while valid_parent != index:
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = valid_parent, self._get_valid_parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Any = self.pos_map[item]
SCREAMING_SNAKE_CASE_: int = [item, self.key(lowerCAmelCase__)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Optional[Any] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_: List[str] = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_: Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase__)])
else:
SCREAMING_SNAKE_CASE_: str = [item, self.key(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.arr[0] if self.size else None
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Optional[Any] = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Dict = TaTokenizerFast
lowerCAmelCase : Optional[int] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 1 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCAmelCase : Union[str, Any] = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class __lowercase ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : str = " "):
SCREAMING_SNAKE_CASE_: Any = sentence_delimiter
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str):
return list(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[str] = []
for sent_idx, sentence in enumerate(lowerCAmelCase__):
chars.extend(self.process_string(lowerCAmelCase__))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__) - 1:
chars.append(self.sentence_delimiter)
return chars
lowerCAmelCase : Dict = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCAmelCase : Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCAmelCase : Any = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCAmelCase : List[str] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowerCAmelCase : Any = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str=False):
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
SCREAMING_SNAKE_CASE_: Any = 0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: List[Any] = jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 13 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] = ShapEPipeline
_UpperCAmelCase : Tuple = ['''prompt''']
_UpperCAmelCase : Dict = ['''prompt''']
_UpperCAmelCase : Any = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Optional[int] = False
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return 8
@property
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE_: Any = PriorTransformer(**lowerCAmelCase__)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_: Optional[int] = ShapERenderer(**lowerCAmelCase__)
return model
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.dummy_prior
SCREAMING_SNAKE_CASE_: Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] = self.dummy_renderer
SCREAMING_SNAKE_CASE_: Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_: Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = "cpu"
SCREAMING_SNAKE_CASE_: Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Dict = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE_: Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Dict = torch_device == "cpu"
SCREAMING_SNAKE_CASE_: List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 1
SCREAMING_SNAKE_CASE_: Any = 2
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_inputs(lowerCAmelCase__)
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_: List[Any] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_: Tuple = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
SCREAMING_SNAKE_CASE_: List[str] = ShapEPipeline.from_pretrained("openai/shap-e")
SCREAMING_SNAKE_CASE_: Optional[int] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
SCREAMING_SNAKE_CASE_: int = pipe(
"a shark" , generator=lowerCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 13 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int = AlbertTokenizer
_UpperCAmelCase : Tuple = AlbertTokenizerFast
_UpperCAmelCase : int = True
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[str] = AlbertTokenizer(lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Tuple = "this is a test"
SCREAMING_SNAKE_CASE_: Optional[int] = "this is a test"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "<pad>"
SCREAMING_SNAKE_CASE_: Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<pad>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "▁eloquent")
self.assertEqual(len(lowerCAmelCase__) , 3_0000)
def _SCREAMING_SNAKE_CASE ( self : int):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: str = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Optional[Any] = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = AlbertTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁this", "▁is", "▁a", "▁test"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [48, 25, 21, 1289])
SCREAMING_SNAKE_CASE_: int = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."])
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9])
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: List[str] = AlbertTokenizer(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode("sequence builders")
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode("multi-sequence build")
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
# fmt: off
SCREAMING_SNAKE_CASE_: Any = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 13 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 13 | 1 |
def A_ ( _UpperCAmelCase = 1_00_00_00 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = set(range(3 , _UpperCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , _UpperCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _UpperCAmelCase , _UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Optional[Any] = [float(_UpperCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_UpperCAmelCase , limit + 1 , _UpperCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
lowerCAmelCase : Optional[Any] = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = set()
SCREAMING_SNAKE_CASE_: Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: List[Any] = char
SCREAMING_SNAKE_CASE_: Dict = set(_UpperCAmelCase )
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : List[str]="<unk>" , lowerCAmelCase__ : str="<pad>" , lowerCAmelCase__ : List[Any]="<mask>" , **lowerCAmelCase__ : List[Any] , ):
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE_: Optional[int] = merges_file
SCREAMING_SNAKE_CASE_: List[str] = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[int] = 1
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: Optional[Any] = 3
self.add_from_file(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: int = merges_handle.read().split("\n")[:-1]
SCREAMING_SNAKE_CASE_: int = [tuple(merge.split()[:-1]) for merge in merges]
SCREAMING_SNAKE_CASE_: Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: Dict = {}
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: List[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Any):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tuple(list(word[:-1]) + [word[-1] + "</w>"])
SCREAMING_SNAKE_CASE_: Tuple = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = bigram
SCREAMING_SNAKE_CASE_: Dict = []
SCREAMING_SNAKE_CASE_: Tuple = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: List[Any] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = "@@ ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word[:-4]
SCREAMING_SNAKE_CASE_: int = word
return word
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Dict = []
SCREAMING_SNAKE_CASE_: Optional[int] = re.findall(R"\S+\n?" , lowerCAmelCase__)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase__).split(" ")))
return split_tokens
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Optional[Any]):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
return self.decoder.get(lowerCAmelCase__ , self.unk_token)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: int = " ".join(lowerCAmelCase__).replace("@@ " , "").strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file , lowerCAmelCase__)
if os.path.abspath(self.merges_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.merges_file , lowerCAmelCase__)
return out_vocab_file, out_merge_file
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Union[str, Any]):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
try:
with open(lowerCAmelCase__ , "r" , encoding="utf-8") as fd:
self.add_from_file(lowerCAmelCase__)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"Incorrect encoding detected in {f}, please rebuild the dataset")
return
SCREAMING_SNAKE_CASE_: Any = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE_: str = lineTmp.strip()
SCREAMING_SNAKE_CASE_: Any = line.rfind(" ")
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
SCREAMING_SNAKE_CASE_: Optional[int] = line[:idx]
SCREAMING_SNAKE_CASE_: int = len(self.encoder)
| 13 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase : Any = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
if rng is None:
SCREAMING_SNAKE_CASE_: List[Any] = random.Random()
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for _ in range(_UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_: List[Any] = np.array(_UpperCAmelCase , dtype=jnp.intaa ).reshape(_UpperCAmelCase )
return output
def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor(_UpperCAmelCase , vocab_size=2 , rng=_UpperCAmelCase )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
return attn_mask
@require_flax
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = ()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[int] = inputs["input_ids"].shape[-1] // 2
SCREAMING_SNAKE_CASE_: List[str] = inputs["input_ids"][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_: Any = jnp.ones_like(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_: Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_: Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: Dict = max_length
SCREAMING_SNAKE_CASE_: List[Any] = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: str = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , flax_model.params)
SCREAMING_SNAKE_CASE_: List[Any] = flax_model.generate(lowerCAmelCase__).sequences
SCREAMING_SNAKE_CASE_: str = pt_model.generate(torch.tensor(lowerCAmelCase__ , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_: List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[Any] = True
SCREAMING_SNAKE_CASE_: Dict = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Dict = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
SCREAMING_SNAKE_CASE_: Optional[int] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[int] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: str = False
SCREAMING_SNAKE_CASE_: int = max_length
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Tuple = True
SCREAMING_SNAKE_CASE_: List[str] = max_length
SCREAMING_SNAKE_CASE_: Any = 0.8
SCREAMING_SNAKE_CASE_: Any = 10
SCREAMING_SNAKE_CASE_: List[str] = 0.3
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: int = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: List[str] = 2
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: Tuple = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Any = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: List[Any] = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[int] = True
SCREAMING_SNAKE_CASE_: Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
SCREAMING_SNAKE_CASE_: Any = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
SCREAMING_SNAKE_CASE_: List[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
SCREAMING_SNAKE_CASE_: Optional[int] = "Hello world"
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase__ , "do_samples"):
model.generate(lowerCAmelCase__ , do_samples=lowerCAmelCase__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase__ , "foo"):
SCREAMING_SNAKE_CASE_: str = {"foo": "bar"}
model.generate(lowerCAmelCase__ , **lowerCAmelCase__)
| 13 | 1 |
import pprint
import requests
lowerCAmelCase : Dict = """https://zenquotes.io/api"""
def A_ ( ):
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def A_ ( ):
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
lowerCAmelCase : List[str] = random_quotes()
pprint.pprint(response)
| 13 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase : Union[str, Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = create_model(
"HTSAT-tiny" , "roberta" , _UpperCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_UpperCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Tuple = R".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE_: Dict = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_: Any = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_: Optional[int] = re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 )
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_UpperCAmelCase )//3}.linear." )
elif re.match(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_: Optional[int] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_: Tuple = value
SCREAMING_SNAKE_CASE_: List[str] = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_: Any = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_: Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_: str = query_layer
SCREAMING_SNAKE_CASE_: int = key_layer
SCREAMING_SNAKE_CASE_: List[Any] = value_layer
else:
SCREAMING_SNAKE_CASE_: int = value
return model_state_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = clap_model.state_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = rename_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ClapConfig()
SCREAMING_SNAKE_CASE_: Tuple = enable_fusion
SCREAMING_SNAKE_CASE_: Tuple = ClapModel(_UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
transformers_config.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCAmelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 13 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCAmelCase : List[Any] = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : str
_UpperCAmelCase : Optional[str] = None
_UpperCAmelCase : Optional[Union[str, int]] = None
_UpperCAmelCase : Optional[Union[str, int]] = None
_UpperCAmelCase : Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = _str_to_version_tuple(self.version_str)
def __repr__( self : List[Any]):
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
return Version(lowerCAmelCase__)
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
return other
raise TypeError(F"{other} (type {type(lowerCAmelCase__)}) cannot be compared to version.")
def __eq__( self : Tuple , lowerCAmelCase__ : Optional[int]):
try:
SCREAMING_SNAKE_CASE_: List[str] = self._validate_operand(lowerCAmelCase__)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Optional[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = self._validate_operand(lowerCAmelCase__)
return self.tuple < other.tuple
def __hash__( self : str):
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _SCREAMING_SNAKE_CASE ( self : Dict):
return self.version_str
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(_UpperCAmelCase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def A_ ( _UpperCAmelCase ):
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 13 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=2 , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_: str = image_size
SCREAMING_SNAKE_CASE_: Tuple = patch_size
SCREAMING_SNAKE_CASE_: int = num_channels
SCREAMING_SNAKE_CASE_: List[str] = is_training
SCREAMING_SNAKE_CASE_: str = use_labels
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Any = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Dict = scope
SCREAMING_SNAKE_CASE_: Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_: List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Dict = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = ViTForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = ViTModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_: str = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Any = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE_: str = ViTModel.from_pretrained("facebook/dino-vits8").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480)
SCREAMING_SNAKE_CASE_: List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: int = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Tuple = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto")
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Dict = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: str = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
| 13 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = ['''pixel_values''']
def __init__( self : str , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Tuple , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"shortest_edge": 224}
SCREAMING_SNAKE_CASE_: Any = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_: str = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name="crop_size")
SCREAMING_SNAKE_CASE_: Optional[int] = do_resize
SCREAMING_SNAKE_CASE_: List[Any] = size
SCREAMING_SNAKE_CASE_: Optional[Any] = resample
SCREAMING_SNAKE_CASE_: List[Any] = do_center_crop
SCREAMING_SNAKE_CASE_: List[Any] = crop_size
SCREAMING_SNAKE_CASE_: Dict = do_rescale
SCREAMING_SNAKE_CASE_: Dict = rescale_factor
SCREAMING_SNAKE_CASE_: str = do_normalize
SCREAMING_SNAKE_CASE_: List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_: Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_: List[str] = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
SCREAMING_SNAKE_CASE_: List[str] = get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__)
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[int] , ):
SCREAMING_SNAKE_CASE_: Optional[Any] = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}")
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[int] , ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[int] , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE_: Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: str = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: str = get_size_dict(lowerCAmelCase__ , param_name="size" , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_: List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_: List[str] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_: str = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: List[Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Tuple = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_: Union[str, Any] = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: int = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: List[Any] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: Tuple = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
| 13 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Union[str, Any]):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
| 13 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = '''bit'''
_UpperCAmelCase : Optional[Any] = ['''preactivation''', '''bottleneck''']
_UpperCAmelCase : List[Any] = ['''SAME''', '''VALID''']
def __init__( self : Tuple , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : int=64 , lowerCAmelCase__ : List[Any]=[256, 512, 1024, 2048] , lowerCAmelCase__ : Dict=[3, 4, 6, 3] , lowerCAmelCase__ : int="preactivation" , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[Any]=32 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Any=None , **lowerCAmelCase__ : int , ):
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
SCREAMING_SNAKE_CASE_: List[Any] = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported")
SCREAMING_SNAKE_CASE_: Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_: Dict = embedding_size
SCREAMING_SNAKE_CASE_: int = hidden_sizes
SCREAMING_SNAKE_CASE_: Dict = depths
SCREAMING_SNAKE_CASE_: str = layer_type
SCREAMING_SNAKE_CASE_: int = hidden_act
SCREAMING_SNAKE_CASE_: Optional[int] = global_padding
SCREAMING_SNAKE_CASE_: Tuple = num_groups
SCREAMING_SNAKE_CASE_: List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE_: Tuple = embedding_dynamic_padding
SCREAMING_SNAKE_CASE_: List[str] = output_stride
SCREAMING_SNAKE_CASE_: Union[str, Any] = width_factor
SCREAMING_SNAKE_CASE_: Optional[Any] = ["stem"] + [F"stage{idx}" for idx in range(1 , len(lowerCAmelCase__) + 1)]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names)
| 13 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = XGLMTokenizer
_UpperCAmelCase : List[Any] = XGLMTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[Any] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "<pad>"
SCREAMING_SNAKE_CASE_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase__) , 1008)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def _SCREAMING_SNAKE_CASE ( self : str):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
SCREAMING_SNAKE_CASE_: Tuple = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "Hello World!"
SCREAMING_SNAKE_CASE_: Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
# fmt: off
SCREAMING_SNAKE_CASE_: str = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 13 | 1 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
SCREAMING_SNAKE_CASE_: Any = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCAmelCase ) )
return round(_UpperCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_: int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_: Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_: Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase : List[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 13 | 1 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: List[Any] = 8
# DPR tok
SCREAMING_SNAKE_CASE_: str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE_: Tuple = os.path.join(self.tmpdirname , "dpr_tokenizer")
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = os.path.join(lowerCAmelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
SCREAMING_SNAKE_CASE_: Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
SCREAMING_SNAKE_CASE_: Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE_: Optional[int] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE_: Optional[Any] = os.path.join(self.tmpdirname , "bart_tokenizer")
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Tuple = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(lowerCAmelCase__) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(lowerCAmelCase__))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer"))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer"))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(self.tmpdirname , "rag_tokenizer")
SCREAMING_SNAKE_CASE_: Tuple = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
SCREAMING_SNAKE_CASE_: Union[str, Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(lowerCAmelCase__)
rag_tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = RagTokenizer.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__)
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCAmelCase__)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , lowerCAmelCase__)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = RagTokenizer.from_pretrained("facebook/rag-token-nq")
SCREAMING_SNAKE_CASE_: str = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
SCREAMING_SNAKE_CASE_: List[str] = tokenizer(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: List[str] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
SCREAMING_SNAKE_CASE_: Any = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
| 13 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A_ ( _UpperCAmelCase ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = '''sigmoid'''
_UpperCAmelCase : str = '''softmax'''
_UpperCAmelCase : List[Any] = '''none'''
@add_end_docstrings(
UpperCAmelCase_ , r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Tuple = ClassificationFunction.NONE
def __init__( self : Union[str, Any] , **lowerCAmelCase__ : int):
super().__init__(**lowerCAmelCase__)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[str]="" , **lowerCAmelCase__ : Union[str, Any]):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE_: int = tokenizer_kwargs
SCREAMING_SNAKE_CASE_: Dict = {}
if hasattr(self.model.config , "return_all_scores") and return_all_scores is None:
SCREAMING_SNAKE_CASE_: List[str] = self.model.config.return_all_scores
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) or top_k is None:
SCREAMING_SNAKE_CASE_: Any = top_k
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , lowerCAmelCase__ , )
if return_all_scores:
SCREAMING_SNAKE_CASE_: int = None
else:
SCREAMING_SNAKE_CASE_: str = 1
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: List[str] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE_: List[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : int , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = super().__call__(*lowerCAmelCase__ , **lowerCAmelCase__)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE_: Tuple = "top_k" not in kwargs
if isinstance(args[0] , lowerCAmelCase__) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.framework
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
return self.tokenizer(**lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__)
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) == 1 and isinstance(inputs[0] , lowerCAmelCase__) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__)
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.")
return self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[Any]):
return self.model(**lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : Optional[Any]=True):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE_: Optional[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply") and function_to_apply is None:
SCREAMING_SNAKE_CASE_: Tuple = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE_: List[str] = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE_: List[Any] = model_outputs["logits"][0]
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE_: List[str] = sigmoid(lowerCAmelCase__)
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE_: Tuple = softmax(lowerCAmelCase__)
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE_: List[str] = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}")
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE_: Dict = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(lowerCAmelCase__)
]
if not _legacy:
dict_scores.sort(key=lambda lowerCAmelCase__: x["score"] , reverse=lowerCAmelCase__)
if top_k is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] = dict_scores[:top_k]
return dict_scores
| 13 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = f"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = f"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = input_str.split("_" )
SCREAMING_SNAKE_CASE_: str = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE_: int = words[start_index:]
SCREAMING_SNAKE_CASE_: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE_: List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCAmelCase : int = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = AudioClassificationPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__)
# test with a raw waveform
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.zeros((3_4000,))
SCREAMING_SNAKE_CASE_: List[Any] = np.zeros((1_4000,))
return audio_classifier, [audioa, audio]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = examples
SCREAMING_SNAKE_CASE_: List[str] = audio_classifier(lowerCAmelCase__)
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCAmelCase__ , [
{"score": ANY(lowerCAmelCase__), "label": ANY(lowerCAmelCase__)},
{"score": ANY(lowerCAmelCase__), "label": ANY(lowerCAmelCase__)},
] , )
SCREAMING_SNAKE_CASE_: Dict = audio_classifier(lowerCAmelCase__ , top_k=1)
self.assertEqual(
lowerCAmelCase__ , [
{"score": ANY(lowerCAmelCase__), "label": ANY(lowerCAmelCase__)},
] , )
self.run_torchaudio(lowerCAmelCase__)
@require_torchaudio
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Dict):
import datasets
# test with a local file
SCREAMING_SNAKE_CASE_: Optional[Any] = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
SCREAMING_SNAKE_CASE_: List[Any] = dataset[0]["audio"]["array"]
SCREAMING_SNAKE_CASE_: Tuple = audio_classifier(lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"score": ANY(lowerCAmelCase__), "label": ANY(lowerCAmelCase__)},
{"score": ANY(lowerCAmelCase__), "label": ANY(lowerCAmelCase__)},
] , )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[str] = "anton-l/wav2vec2-random-tiny-classifier"
SCREAMING_SNAKE_CASE_: Tuple = pipeline("audio-classification" , model=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = np.ones((8000,))
SCREAMING_SNAKE_CASE_: Optional[int] = audio_classifier(lowerCAmelCase__ , top_k=4)
SCREAMING_SNAKE_CASE_: List[Any] = [
{"score": 0.0842, "label": "no"},
{"score": 0.0838, "label": "up"},
{"score": 0.0837, "label": "go"},
{"score": 0.0834, "label": "right"},
]
SCREAMING_SNAKE_CASE_: List[Any] = [
{"score": 0.0845, "label": "stop"},
{"score": 0.0844, "label": "on"},
{"score": 0.0841, "label": "right"},
{"score": 0.0834, "label": "left"},
]
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
SCREAMING_SNAKE_CASE_: int = {"array": np.ones((8000,)), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
SCREAMING_SNAKE_CASE_: str = audio_classifier(lowerCAmelCase__ , top_k=4)
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] = "superb/wav2vec2-base-superb-ks"
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("audio-classification" , model=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test")
SCREAMING_SNAKE_CASE_: List[Any] = np.array(dataset[3]["speech"] , dtype=np.floataa)
SCREAMING_SNAKE_CASE_: str = audio_classifier(lowerCAmelCase__ , top_k=4)
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=3) , [
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
pass
| 13 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for _ in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: List[str] = []
for step in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )
scheduler.load_state_dict(_UpperCAmelCase )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(100):
SCREAMING_SNAKE_CASE_: Dict = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , )
for _ in range(1000):
SCREAMING_SNAKE_CASE_: List[Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase : Optional[Any] = 10
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = data
SCREAMING_SNAKE_CASE_: List[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
SCREAMING_SNAKE_CASE_: int = unwrap_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE_: List[str] = scheduler_func(self.optimizer , **lowerCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: Tuple = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"failed for {scheduler_func} in save and reload")
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = fn
def __call__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple):
return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = list(map(self , scheduler.lr_lambdas))
| 13 | 1 |
import socket
def A_ ( ):
SCREAMING_SNAKE_CASE_: str = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE_: Tuple = socket.gethostname()
SCREAMING_SNAKE_CASE_: str = 1_23_12
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
SCREAMING_SNAKE_CASE_: List[Any] = sock.recv(10_24 )
if not data:
break
out_file.write(_UpperCAmelCase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 13 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowerCAmelCase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_: Optional[int] = self.input_schema.copy()
SCREAMING_SNAKE_CASE_: Dict = features[self.audio_column]
SCREAMING_SNAKE_CASE_: int = input_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 13 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "hidden_sizes"))
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "neck_hidden_sizes"))
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_attention_heads"))
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=13 , lowerCAmelCase__ : Any=32 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : List[str]=640 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Optional[int]="silu" , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Union[str, Any]=32 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : str=10 , lowerCAmelCase__ : Dict=None , ):
SCREAMING_SNAKE_CASE_: Any = parent
SCREAMING_SNAKE_CASE_: List[Any] = batch_size
SCREAMING_SNAKE_CASE_: Tuple = image_size
SCREAMING_SNAKE_CASE_: Tuple = patch_size
SCREAMING_SNAKE_CASE_: List[Any] = num_channels
SCREAMING_SNAKE_CASE_: str = last_hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Dict = hidden_act
SCREAMING_SNAKE_CASE_: Optional[Any] = conv_kernel_size
SCREAMING_SNAKE_CASE_: str = output_stride
SCREAMING_SNAKE_CASE_: Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = classifier_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_: Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_: Any = num_labels
SCREAMING_SNAKE_CASE_: Tuple = initializer_range
SCREAMING_SNAKE_CASE_: List[str] = scope
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: str = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
SCREAMING_SNAKE_CASE_: List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: List[Any] = MobileViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Tuple = model(lowerCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_: str = MobileViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Any = self.num_labels
SCREAMING_SNAKE_CASE_: List[Any] = MobileViTForSemanticSegmentation(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(lowerCAmelCase__)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = config_and_inputs
SCREAMING_SNAKE_CASE_: int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Any = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : int = False
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = MobileViTModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = MobileViTConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings")
def _SCREAMING_SNAKE_CASE ( self : Dict):
pass
@unittest.skip(reason="MobileViT does not output attentions")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
def check_hidden_states_output(lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.hidden_states
SCREAMING_SNAKE_CASE_: Any = 5
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
for i in range(len(lowerCAmelCase__)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Optional[int] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[int] = MobileViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: List[str] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: int = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = torch.tensor([-1.9364, -1.2327, -0.4653]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
SCREAMING_SNAKE_CASE_: Optional[Any] = model.to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Any = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowerCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[str] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
SCREAMING_SNAKE_CASE_: Dict = model.to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
SCREAMING_SNAKE_CASE_: Dict = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_: Any = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(50, 60)])
SCREAMING_SNAKE_CASE_: Tuple = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , lowerCAmelCase__)
| 13 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = 20
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1)
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: str = 10
SCREAMING_SNAKE_CASE_: Tuple = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_: Any = 5
SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Dict = 10
SCREAMING_SNAKE_CASE_: Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 20
SCREAMING_SNAKE_CASE_: List[str] = 4
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_: int = 5
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = 15
SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: str = 4
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_: List[Any] = 3
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = 20
SCREAMING_SNAKE_CASE_: Optional[Any] = 4
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[Any] = 5
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = 4
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: int = 15
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: List[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# with processor list
SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[str] = 15
SCREAMING_SNAKE_CASE_: List[Any] = 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 13 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Dict = KandinskyImgaImgPipeline
_UpperCAmelCase : Dict = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
_UpperCAmelCase : Union[str, Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_UpperCAmelCase : Dict = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Tuple = False
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : str):
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base")
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE_: Optional[int] = MultilingualCLIP(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = text_encoder.eval()
return text_encoder
@property
def _SCREAMING_SNAKE_CASE ( self : str):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE_: Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase__)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Any = VQModel(**self.dummy_movq_kwargs)
return model
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: List[Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: Optional[int] = self.dummy_unet
SCREAMING_SNAKE_CASE_: Any = self.dummy_movq
SCREAMING_SNAKE_CASE_: str = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_0085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
SCREAMING_SNAKE_CASE_: str = DDIMScheduler(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=0):
SCREAMING_SNAKE_CASE_: Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowerCAmelCase__)
# create init_image
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE_: Any = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("RGB").resize((256, 256))
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: List[str] = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: List[Any] = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = "cpu"
SCREAMING_SNAKE_CASE_: Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Optional[Any] = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: List[str] = output.images
SCREAMING_SNAKE_CASE_: Any = pipe(
**self.get_dummy_inputs(lowerCAmelCase__) , return_dict=lowerCAmelCase__ , )[0]
SCREAMING_SNAKE_CASE_: int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: str = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy")
SCREAMING_SNAKE_CASE_: Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
SCREAMING_SNAKE_CASE_: str = "A red cartoon frog, 4k"
SCREAMING_SNAKE_CASE_: int = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE_: int = pipeline.to(lowerCAmelCase__)
pipeline.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE_: Any = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
SCREAMING_SNAKE_CASE_: str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 13 |
import math
import sys
def A_ ( _UpperCAmelCase ):
if number != int(_UpperCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE_: List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE_: str = sys.maxsize
SCREAMING_SNAKE_CASE_: List[Any] = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Tuple = IFInpaintingSuperResolutionPipeline
_UpperCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_UpperCAmelCase : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _SCREAMING_SNAKE_CASE ( self : str):
return self._get_superresolution_dummy_components()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Tuple = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: str = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _SCREAMING_SNAKE_CASE ( self : Dict):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA")
def _SCREAMING_SNAKE_CASE ( self : Any):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def _SCREAMING_SNAKE_CASE ( self : int):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
self._test_save_load_local()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 13 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] = CanineTokenizer
_UpperCAmelCase : Dict = False
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
super().setUp()
SCREAMING_SNAKE_CASE_: Any = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return CanineTokenizer.from_pretrained("google/canine-s")
def _SCREAMING_SNAKE_CASE ( self : str , **lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 1024
return tokenizer
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = self.canine_tokenizer
SCREAMING_SNAKE_CASE_: Optional[int] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
SCREAMING_SNAKE_CASE_: List[Any] = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = list(batch.input_ids.numpy()[0])
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertEqual((2, 39) , batch.input_ids.shape)
self.assertEqual((2, 39) , batch.attention_mask.shape)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.canine_tokenizer
SCREAMING_SNAKE_CASE_: Tuple = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt")
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , lowerCAmelCase__)
self.assertIn("attention_mask" , lowerCAmelCase__)
self.assertIn("token_type_ids" , lowerCAmelCase__)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = self.canine_tokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] = [
"What's the weater?",
"It's about 25 degrees.",
]
SCREAMING_SNAKE_CASE_: int = tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding="max_length" , truncation=lowerCAmelCase__ , return_tensors="pt")
self.assertEqual(32 , targets["input_ids"].shape[1])
def _SCREAMING_SNAKE_CASE ( self : str):
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_: Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
SCREAMING_SNAKE_CASE_: List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_: Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: List[str] = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.__class__.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
shutil.rmtree(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_: Dict = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: List[Any] = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
SCREAMING_SNAKE_CASE_: str = chr(0xE007)
additional_special_tokens.append(lowerCAmelCase__)
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
SCREAMING_SNAKE_CASE_: Any = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.__class__.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertIn(lowerCAmelCase__ , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.get_clean_sequence(lowerCAmelCase__)
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE_: int = 0xE005
SCREAMING_SNAKE_CASE_: List[Any] = chr(lowerCAmelCase__)
tokenizer.add_special_tokens({"cls_token": special_token})
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__) , 1)
SCREAMING_SNAKE_CASE_: str = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , input_encoded + special_token_id)
SCREAMING_SNAKE_CASE_: str = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
self.assertTrue(special_token not in decoded)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
SCREAMING_SNAKE_CASE_: Dict = chr(0xE005)
SCREAMING_SNAKE_CASE_: Tuple = chr(0xE006)
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase__)
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]})
SCREAMING_SNAKE_CASE_: int = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.tokenize(lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__) , 1)
self.assertEqual(len(lowerCAmelCase__) , 1)
self.assertEqual(token_a[0] , lowerCAmelCase__)
self.assertEqual(token_a[0] , lowerCAmelCase__)
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE_: Optional[Any] = 0xE006
SCREAMING_SNAKE_CASE_: int = chr(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__)
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]})
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase__)
tokenizer.from_pretrained(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__ , "special_tokens_map.json") , encoding="utf-8") as json_file:
SCREAMING_SNAKE_CASE_: Optional[int] = json.load(lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__ , "tokenizer_config.json") , encoding="utf-8") as json_file:
SCREAMING_SNAKE_CASE_: int = json.load(lowerCAmelCase__)
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE_: List[str] = 0xE006
SCREAMING_SNAKE_CASE_: Union[str, Any] = chr(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = [new_token_a]
SCREAMING_SNAKE_CASE_: List[Any] = [new_token_a]
with open(os.path.join(lowerCAmelCase__ , "special_tokens_map.json") , "w" , encoding="utf-8") as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__ , "tokenizer_config.json") , "w" , encoding="utf-8") as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer_class.from_pretrained(lowerCAmelCase__ , extra_ids=0)
self.assertIn(lowerCAmelCase__ , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a])) , )
SCREAMING_SNAKE_CASE_: int = 0xE007
SCREAMING_SNAKE_CASE_: Dict = chr(lowerCAmelCase__)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_: Optional[Any] = [AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: Tuple = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , extra_ids=0)
self.assertIn(lowerCAmelCase__ , tokenizer.additional_special_tokens)
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a])))
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
SCREAMING_SNAKE_CASE_: Optional[int] = "hello world"
if self.space_between_special_tokens:
SCREAMING_SNAKE_CASE_: Tuple = "[CLS] hello world [SEP]"
else:
SCREAMING_SNAKE_CASE_: List[str] = input
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.decode(lowerCAmelCase__ , spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(lowerCAmelCase__ , [output, output.lower()])
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
SCREAMING_SNAKE_CASE_: Optional[int] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
SCREAMING_SNAKE_CASE_: str = "a"
SCREAMING_SNAKE_CASE_: List[Any] = ord(lowerCAmelCase__)
for attr in attributes_list:
setattr(lowerCAmelCase__ , attr + "_id" , lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__ , attr + "_id") , lowerCAmelCase__)
setattr(lowerCAmelCase__ , attr + "_id" , lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(getattr(lowerCAmelCase__ , attr + "_id") , lowerCAmelCase__)
setattr(lowerCAmelCase__ , "additional_special_tokens_ids" , [])
self.assertListEqual(getattr(lowerCAmelCase__ , "additional_special_tokens") , [])
self.assertListEqual(getattr(lowerCAmelCase__ , "additional_special_tokens_ids") , [])
SCREAMING_SNAKE_CASE_: List[Any] = 0xE006
SCREAMING_SNAKE_CASE_: Union[str, Any] = chr(lowerCAmelCase__)
setattr(lowerCAmelCase__ , "additional_special_tokens_ids" , [additional_special_token_id])
self.assertListEqual(getattr(lowerCAmelCase__ , "additional_special_tokens") , [additional_special_token])
self.assertListEqual(getattr(lowerCAmelCase__ , "additional_special_tokens_ids") , [additional_special_token_id])
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : List[str]):
pass
def _SCREAMING_SNAKE_CASE ( self : Any):
pass
def _SCREAMING_SNAKE_CASE ( self : Dict):
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple):
pass
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple):
pass
| 13 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13 | 1 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.functional.normalize(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = nn.functional.normalize(_UpperCAmelCase )
return torch.mm(_UpperCAmelCase , normalized_text_embeds.t() )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = CLIPConfig
_UpperCAmelCase : Optional[int] = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , lowerCAmelCase__ : CLIPConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = CLIPVisionModel(config.vision_config)
SCREAMING_SNAKE_CASE_: List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = nn.Parameter(torch.ones(17 , config.projection_dim) , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = nn.Parameter(torch.ones(3 , config.projection_dim) , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = nn.Parameter(torch.ones(17) , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = nn.Parameter(torch.ones(3) , requires_grad=lowerCAmelCase__)
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = self.vision_model(lowerCAmelCase__)[1] # pooled_output
SCREAMING_SNAKE_CASE_: Tuple = self.visual_projection(lowerCAmelCase__)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_: Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds).cpu().float().numpy()
SCREAMING_SNAKE_CASE_: Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds).cpu().float().numpy()
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_: List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0])):
SCREAMING_SNAKE_CASE_: int = special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_: str = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]})
SCREAMING_SNAKE_CASE_: Tuple = 0.01
for concept_idx in range(len(cos_dist[0])):
SCREAMING_SNAKE_CASE_: List[str] = cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_: Any = self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_: Optional[int] = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = [len(res["bad_concepts"]) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : torch.FloatTensor):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.vision_model(lowerCAmelCase__)[1] # pooled_output
SCREAMING_SNAKE_CASE_: Tuple = self.visual_projection(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = cosine_distance(lowerCAmelCase__ , self.special_care_embeds)
SCREAMING_SNAKE_CASE_: str = cosine_distance(lowerCAmelCase__ , self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_: Dict = 0.0
SCREAMING_SNAKE_CASE_: List[str] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_: List[str] = torch.any(special_scores > 0 , dim=1)
SCREAMING_SNAKE_CASE_: str = special_care * 0.01
SCREAMING_SNAKE_CASE_: Optional[Any] = special_adjustment.unsqueeze(1).expand(-1 , cos_dist.shape[1])
SCREAMING_SNAKE_CASE_: Any = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_: Optional[int] = torch.any(concept_scores > 0 , dim=1)
return images, has_nsfw_concepts
| 13 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Dict = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : str = BartTokenizer
def __init__( self : Any , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Dict="replace" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Optional[int]="<pad>" , lowerCAmelCase__ : List[str]="<mask>" , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Optional[int]=True , **lowerCAmelCase__ : Optional[int] , ):
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__) != add_prefix_space:
SCREAMING_SNAKE_CASE_: List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop("type"))
SCREAMING_SNAKE_CASE_: str = add_prefix_space
SCREAMING_SNAKE_CASE_: Optional[Any] = pre_tok_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE_: Tuple = "post_processor"
SCREAMING_SNAKE_CASE_: Optional[Any] = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__)
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE_: Optional[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE_: Any = tuple(state["sep"])
if "cls" in state:
SCREAMING_SNAKE_CASE_: Optional[Any] = tuple(state["cls"])
SCREAMING_SNAKE_CASE_: Optional[Any] = False
if state.get("add_prefix_space" , lowerCAmelCase__) != add_prefix_space:
SCREAMING_SNAKE_CASE_: Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE_: List[Any] = True
if state.get("trim_offsets" , lowerCAmelCase__) != trim_offsets:
SCREAMING_SNAKE_CASE_: Optional[int] = trim_offsets
SCREAMING_SNAKE_CASE_: int = True
if changes_to_apply:
SCREAMING_SNAKE_CASE_: List[str] = getattr(lowerCAmelCase__ , state.pop("type"))
SCREAMING_SNAKE_CASE_: str = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : int):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else value
SCREAMING_SNAKE_CASE_: List[str] = value
def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Optional[int] = kwargs.get("is_split_into_words" , lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: int = kwargs.get("is_split_into_words" , lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
SCREAMING_SNAKE_CASE_: Tuple = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 13 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = emb.weight.shape
SCREAMING_SNAKE_CASE_: Dict = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = emb.weight.data
return lin_layer
def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE_: Any = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE_: List[Any] = key.replace("moe_layer.experts.0" , f"ffn.experts.expert_{expert_idx}" )
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
SCREAMING_SNAKE_CASE_: Dict = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE_: Optional[Any] = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE_: Union[str, Any] = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE_: List[Any] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE_: List[Any] = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE_: Optional[int] = key.replace("final_layer_norm" , "ff_layer_norm" )
SCREAMING_SNAKE_CASE_: Optional[int] = state_dict[old_key]
return new_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = WEIGHTS_NAME ):
SCREAMING_SNAKE_CASE_: Tuple = []
SCREAMING_SNAKE_CASE_: Any = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
for expert in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = switch_checkpoint_path + f"-rank-{expert}.pt"
if os.path.isfile(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )["model"]
remove_ignore_keys_(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = rename_fairseq_keys(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(
_UpperCAmelCase , weights_name.replace(".bin" , f"-{len(_UpperCAmelCase )+1:05d}-of-???.bin" ) )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_UpperCAmelCase )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE_: List[Any] = os.path.join(_UpperCAmelCase , weights_name.replace(".bin" , f"-{len(_UpperCAmelCase )+1:05d}-of-???.bin" ) )
SCREAMING_SNAKE_CASE_: Tuple = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = rename_fairseq_keys(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE_: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_UpperCAmelCase , _UpperCAmelCase )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE_: Tuple = {}
for idx, shard in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = weights_name.replace(".bin" , f"-{idx+1:05d}-of-{len(_UpperCAmelCase ):05d}.bin" )
SCREAMING_SNAKE_CASE_: List[Any] = os.path.join(_UpperCAmelCase , weights_name.replace(".bin" , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
for key in shard:
SCREAMING_SNAKE_CASE_: List[str] = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE_: List[Any] = {"total_size": total_size}
SCREAMING_SNAKE_CASE_: Optional[Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_: Union[str, Any] = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + "\n"
f.write(_UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCAmelCase : Dict = parser.parse_args()
lowerCAmelCase , lowerCAmelCase : Dict = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCAmelCase : Union[str, Any] = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCAmelCase : int = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 13 |
from collections.abc import Callable
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Callable | None = None):
# Stores actual heap items.
SCREAMING_SNAKE_CASE_: list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_: dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_: Any = key or (lambda lowerCAmelCase__: x)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int):
return int((i - 1) / 2) if i > 0 else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 2)
return right if 0 < right < self.size else None
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.arr[j], self.arr[i]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
return self.arr[i][1] < self.arr[j][1]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Any = self._left(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self._right(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = i
if left is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[int] = left
if right is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = right
return valid_parent
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = self._parent(lowerCAmelCase__)
while parent is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = parent, self._parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = self._get_valid_parent(lowerCAmelCase__)
while valid_parent != index:
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = valid_parent, self._get_valid_parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Any = self.pos_map[item]
SCREAMING_SNAKE_CASE_: int = [item, self.key(lowerCAmelCase__)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Optional[Any] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_: List[str] = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_: Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase__)])
else:
SCREAMING_SNAKE_CASE_: str = [item, self.key(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.arr[0] if self.size else None
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Dict = TaTokenizerFast
lowerCAmelCase : Optional[int] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_: Tuple = FunnelConfig.from_json_file(_UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_: Optional[Any] = FunnelBaseModel(_UpperCAmelCase ) if base_model else FunnelModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 13 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] = ShapEPipeline
_UpperCAmelCase : Tuple = ['''prompt''']
_UpperCAmelCase : Dict = ['''prompt''']
_UpperCAmelCase : Any = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Optional[int] = False
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return 8
@property
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE_: Any = PriorTransformer(**lowerCAmelCase__)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_: Optional[int] = ShapERenderer(**lowerCAmelCase__)
return model
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.dummy_prior
SCREAMING_SNAKE_CASE_: Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] = self.dummy_renderer
SCREAMING_SNAKE_CASE_: Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_: Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = "cpu"
SCREAMING_SNAKE_CASE_: Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Dict = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE_: Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Dict = torch_device == "cpu"
SCREAMING_SNAKE_CASE_: List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 1
SCREAMING_SNAKE_CASE_: Any = 2
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_inputs(lowerCAmelCase__)
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_: List[Any] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_: Tuple = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
SCREAMING_SNAKE_CASE_: List[str] = ShapEPipeline.from_pretrained("openai/shap-e")
SCREAMING_SNAKE_CASE_: Optional[int] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
SCREAMING_SNAKE_CASE_: int = pipe(
"a shark" , generator=lowerCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 13 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 |
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
import math
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
'''simple docstring'''
lowercase__ = n
lowercase__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
lowercase__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase__ (self : Any , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = w
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowercase__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase__ (self : Any , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase : Any = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 2 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase : Any = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
if rng is None:
SCREAMING_SNAKE_CASE_: List[Any] = random.Random()
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for _ in range(_UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_: List[Any] = np.array(_UpperCAmelCase , dtype=jnp.intaa ).reshape(_UpperCAmelCase )
return output
def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor(_UpperCAmelCase , vocab_size=2 , rng=_UpperCAmelCase )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
return attn_mask
@require_flax
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = ()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[int] = inputs["input_ids"].shape[-1] // 2
SCREAMING_SNAKE_CASE_: List[str] = inputs["input_ids"][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_: Any = jnp.ones_like(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_: Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_: Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: Dict = max_length
SCREAMING_SNAKE_CASE_: List[Any] = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: str = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , flax_model.params)
SCREAMING_SNAKE_CASE_: List[Any] = flax_model.generate(lowerCAmelCase__).sequences
SCREAMING_SNAKE_CASE_: str = pt_model.generate(torch.tensor(lowerCAmelCase__ , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_: List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[Any] = True
SCREAMING_SNAKE_CASE_: Dict = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Dict = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
SCREAMING_SNAKE_CASE_: Optional[int] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[int] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: str = False
SCREAMING_SNAKE_CASE_: int = max_length
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Tuple = True
SCREAMING_SNAKE_CASE_: List[str] = max_length
SCREAMING_SNAKE_CASE_: Any = 0.8
SCREAMING_SNAKE_CASE_: Any = 10
SCREAMING_SNAKE_CASE_: List[str] = 0.3
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: int = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: List[str] = 2
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: Tuple = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Any = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: List[Any] = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[int] = True
SCREAMING_SNAKE_CASE_: Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
SCREAMING_SNAKE_CASE_: Any = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
SCREAMING_SNAKE_CASE_: List[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
SCREAMING_SNAKE_CASE_: Optional[int] = "Hello world"
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase__ , "do_samples"):
model.generate(lowerCAmelCase__ , do_samples=lowerCAmelCase__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase__ , "foo"):
SCREAMING_SNAKE_CASE_: str = {"foo": "bar"}
model.generate(lowerCAmelCase__ , **lowerCAmelCase__)
| 13 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , ) -> Dict:
"""simple docstring"""
A : List[str] = parent
A : int = batch_size
A : Optional[int] = image_size
A : int = num_channels
A : List[Any] = embeddings_size
A : str = hidden_sizes
A : Any = depths
A : List[str] = is_training
A : Any = use_labels
A : Any = hidden_act
A : Tuple = num_labels
A : List[str] = scope
A : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[Any] = None
if self.use_labels:
A : str = ids_tensor([self.batch_size] , self.num_labels )
A : List[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : int = RegNetModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[str] = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : str = self.num_labels
A : List[Any] = RegNetForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = self.prepare_config_and_inputs()
A, A, A : Optional[int] = config_and_inputs
A : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Any = RegNetModelTester(self )
A : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A, A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
A : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Dict = [*signature.parameters.keys()]
A : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Tuple = model_class(config=SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Any = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A, A : str = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Dict = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE )
A : Dict = self.default_image_processor
A : int = prepare_img()
A : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : List[str] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
A : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
A : Tuple = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 3 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase : Union[str, Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = create_model(
"HTSAT-tiny" , "roberta" , _UpperCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_UpperCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Tuple = R".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE_: Dict = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_: Any = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_: Optional[int] = re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 )
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_UpperCAmelCase )//3}.linear." )
elif re.match(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_: Optional[int] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_: Tuple = value
SCREAMING_SNAKE_CASE_: List[str] = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_: Any = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_: Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_: str = query_layer
SCREAMING_SNAKE_CASE_: int = key_layer
SCREAMING_SNAKE_CASE_: List[Any] = value_layer
else:
SCREAMING_SNAKE_CASE_: int = value
return model_state_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = clap_model.state_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = rename_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ClapConfig()
SCREAMING_SNAKE_CASE_: Tuple = enable_fusion
SCREAMING_SNAKE_CASE_: Tuple = ClapModel(_UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
transformers_config.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCAmelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 13 | 0 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=2 , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_: str = image_size
SCREAMING_SNAKE_CASE_: Tuple = patch_size
SCREAMING_SNAKE_CASE_: int = num_channels
SCREAMING_SNAKE_CASE_: List[str] = is_training
SCREAMING_SNAKE_CASE_: str = use_labels
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Any = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Dict = scope
SCREAMING_SNAKE_CASE_: Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_: List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Dict = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = ViTForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = ViTModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_: str = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Any = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE_: str = ViTModel.from_pretrained("facebook/dino-vits8").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480)
SCREAMING_SNAKE_CASE_: List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: int = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Tuple = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto")
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Dict = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: str = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''ChineseCLIPFeatureExtractor''']
UpperCAmelCase__ = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Dict = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class __A( a ):
snake_case_ = '''rwkv'''
snake_case_ = {'''max_position_embeddings''': '''context_length'''}
def __init__( self , _snake_case=50_277 , _snake_case=1_024 , _snake_case=4_096 , _snake_case=32 , _snake_case=None , _snake_case=None , _snake_case=1E-5 , _snake_case=0 , _snake_case=0 , _snake_case=6 , _snake_case=False , _snake_case=True , **_snake_case , ) -> List[str]:
'''simple docstring'''
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) | 6 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'bloom'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : int,lowercase_ : Tuple=2_5_0_8_8_0,lowercase_ : Optional[int]=6_4,lowercase_ : int=2,lowercase_ : Union[str, Any]=8,lowercase_ : str=1E-5,lowercase_ : List[Any]=0.02,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=1,lowercase_ : Optional[int]=2,lowercase_ : Any=False,lowercase_ : Any=0.0,lowercase_ : int=0.0,lowercase_ : Any=1,lowercase_ : Dict=False,**lowercase_ : Tuple,)-> Dict:
'''simple docstring'''
A__ = vocab_size
# Backward compatibility with n_embed kwarg
A__ = kwargs.pop('n_embed',lowercase_ )
A__ = hidden_size if n_embed is None else n_embed
A__ = n_layer
A__ = n_head
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
A__ = pretraining_tp
A__ = apply_residual_connection_post_layernorm
A__ = hidden_dropout
A__ = attention_dropout
A__ = bos_token_id
A__ = eos_token_id
A__ = slow_but_exact
super().__init__(bos_token_id=lowercase_,eos_token_id=lowercase_,**lowercase_ )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = version.parse('1.12' )
def __init__( self : Union[str, Any],lowercase_ : PretrainedConfig,lowercase_ : str = "default",lowercase_ : List[PatchingSpec] = None,lowercase_ : bool = False,)-> List[str]:
'''simple docstring'''
super().__init__(lowercase_,task=lowercase_,patching_specs=lowercase_,use_past=lowercase_ )
if not getattr(self._config,'pad_token_id',lowercase_ ):
# TODO: how to do that better?
A__ = 0
@property
def snake_case__ ( self : Optional[int] )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A__ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowercase_,direction='inputs',inverted_values_shape=lowercase_ )
A__ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case__ ( self : Tuple )-> int:
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
return self._config.n_head
@property
def snake_case__ ( self : Tuple )-> float:
'''simple docstring'''
return 1E-3
def snake_case__ ( self : Union[str, Any],lowercase_ : "PreTrainedTokenizer",lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional["TensorType"] = None,)-> Mapping[str, Any]:
'''simple docstring'''
A__ = super(lowercase_,self ).generate_dummy_inputs(
lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ )
# We need to order the input in the way they appears in the forward()
A__ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A__ , A__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ = self._config.hidden_size // self.num_attention_heads
A__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
A__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
A__ = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(self.num_layers )
]
A__ = common_inputs['attention_mask']
if self.use_past:
A__ = ordered_inputs['attention_mask'].dtype
A__ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowercase_,lowercase_,dtype=lowercase_ )],dim=1 )
return ordered_inputs
@property
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
return 1_3
| 7 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = XGLMTokenizer
_UpperCAmelCase : List[Any] = XGLMTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[Any] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "<pad>"
SCREAMING_SNAKE_CASE_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase__) , 1008)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def _SCREAMING_SNAKE_CASE ( self : str):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
SCREAMING_SNAKE_CASE_: Tuple = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "Hello World!"
SCREAMING_SNAKE_CASE_: Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
# fmt: off
SCREAMING_SNAKE_CASE_: str = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 13 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Initialise PyTorch model
snake_case_ = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ = MobileBertForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
snake_case_ = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 8 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_: int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_: Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_: Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase : List[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 13 | 0 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ['''flax''', '''transformers''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :List[Any] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Union[str, Any] , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :str ) -> Any:
requires_backends(cls , ['''flax''', '''transformers'''] )
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''flax''', '''transformers''']
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[Any] ) -> Optional[int]:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Any , *lowerCAmelCase__ :str , **lowerCAmelCase__ :str ) -> Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :str , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[str] ) -> Tuple:
requires_backends(cls , ['''flax''', '''transformers'''] )
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ['''flax''', '''transformers''']
def __init__( self :Any , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :List[str] ) -> int:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Optional[Any] , *lowerCAmelCase__ :str , **lowerCAmelCase__ :int ) -> Optional[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Dict , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :Tuple ) -> Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''flax''', '''transformers''']
def __init__( self :Dict , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :List[str] ) -> Tuple:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Tuple , *lowerCAmelCase__ :int , **lowerCAmelCase__ :int ) -> List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[str] ) -> str:
requires_backends(cls , ['''flax''', '''transformers'''] )
| 9 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A = random.Random()
def lowerCAmelCase_ ( __a , __a=1.0 , __a=None , __a=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
lowerCamelCase__: Union[str, Any] =global_rng
lowerCamelCase__: List[Any] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Dict=2_000 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=16_000 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[Any]=True , ) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =parent
lowerCamelCase__: int =batch_size
lowerCamelCase__: Tuple =min_seq_length
lowerCamelCase__: Dict =max_seq_length
lowerCamelCase__: Optional[int] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__: Any =feature_size
lowerCamelCase__: List[str] =padding_value
lowerCamelCase__: Union[str, Any] =sampling_rate
lowerCamelCase__: Dict =return_attention_mask
lowerCamelCase__: Optional[Any] =do_normalize
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=False) ->List[Any]:
'''simple docstring'''
def _flatten(UpperCAmelCase_ : Optional[int]):
return list(itertools.chain(*UpperCAmelCase_))
if equal_length:
lowerCamelCase__: List[Any] =floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
lowerCamelCase__: Dict =[
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
lowerCamelCase__: List[str] =[np.asarray(UpperCAmelCase_) for x in speech_inputs]
return speech_inputs
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = WavaVecaFeatureExtractor
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =WavaVecaFeatureExtractionTester(self)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
self.assertTrue(np.all(np.mean(UpperCAmelCase_ , axis=0) < 1E-3))
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ , axis=0) - 1) < 1E-3))
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
lowerCamelCase__: str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__: Optional[Any] =[floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
lowerCamelCase__: Any =[np.asarray(UpperCAmelCase_) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase__: str =feat_extract(speech_inputs[0] , return_tensors="np").input_values
lowerCamelCase__: int =feat_extract(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
# Test batched
lowerCamelCase__: List[str] =feat_extract(UpperCAmelCase_ , return_tensors="np").input_values
lowerCamelCase__: Dict =feat_extract(UpperCAmelCase_ , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
# Test 2-D numpy arrays are batched.
lowerCamelCase__: Union[str, Any] =[floats_list((1, x))[0] for x in (800, 800, 800)]
lowerCamelCase__: Union[str, Any] =np.asarray(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =feat_extract(UpperCAmelCase_ , return_tensors="np").input_values
lowerCamelCase__: Dict =feat_extract(UpperCAmelCase_ , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCamelCase__: Optional[int] =[floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
lowerCamelCase__: List[Any] =["longest", "max_length", "do_not_pad"]
lowerCamelCase__: Optional[Any] =[None, 1_600, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: str =feat_extract(UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors="np")
lowerCamelCase__: Tuple =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800])
self.assertTrue(input_values[0][800:].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_values[1][:1_000])
self.assertTrue(input_values[0][1_000:].sum() < 1E-6)
self._check_zero_mean_unit_variance(input_values[2][:1_200])
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCamelCase__: Union[str, Any] =range(800 , 1_400 , 200)
lowerCamelCase__: Optional[int] =[floats_list((1, x))[0] for x in lengths]
lowerCamelCase__: Any =["longest", "max_length", "do_not_pad"]
lowerCamelCase__: int =[None, 1_600, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: int =feat_extract(UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding=UpperCAmelCase_)
lowerCamelCase__: Any =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800])
self._check_zero_mean_unit_variance(input_values[1][:1_000])
self._check_zero_mean_unit_variance(input_values[2][:1_200])
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCamelCase__: Union[str, Any] =[floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
lowerCamelCase__: Tuple =feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=1_000 , padding="max_length" , return_tensors="np")
lowerCamelCase__: int =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCamelCase__: Any =[floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
lowerCamelCase__: str =feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=1_000 , padding="longest" , return_tensors="np")
lowerCamelCase__: int =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1, :1_000])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000))
lowerCamelCase__: Optional[Any] =[floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
lowerCamelCase__: str =feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=2_000 , padding="longest" , return_tensors="np")
lowerCamelCase__: int =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800])
self._check_zero_mean_unit_variance(input_values[1, :1_000])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200))
@require_torch
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
import torch
lowerCamelCase__: Tuple =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCamelCase__: int =np.random.rand(100).astype(np.floataa)
lowerCamelCase__: Any =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase__: str =feature_extractor.pad([{"input_values": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_values.dtype == np.floataa)
lowerCamelCase__: Any =feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCamelCase__: Optional[int] =WavaVecaConfig.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: List[str] =WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase_)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer")
| 10 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = f"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = f"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = input_str.split("_" )
SCREAMING_SNAKE_CASE_: str = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE_: int = words[start_index:]
SCREAMING_SNAKE_CASE_: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE_: List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _UpperCAmelCase (UpperCamelCase__ : str ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : Dict = model_type_to_module_name(UpperCamelCase__ )
_A : Tuple = importlib.import_module(f".{module_name}" , "transformers.models" )
try:
return getattr(UpperCamelCase__ , UpperCamelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase__ , "__name__" , UpperCamelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : Any = importlib.import_module("transformers" )
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
return getattr(UpperCamelCase__ , UpperCamelCase__ )
return None
def _UpperCAmelCase (UpperCamelCase__ : Union[str, os.PathLike] , UpperCamelCase__ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[Dict[str, str]] = None , UpperCamelCase__ : Optional[Union[bool, str]] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
_A : Tuple = get_file_from_repo(
UpperCamelCase__ , UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , resume_download=UpperCamelCase__ , proxies=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , local_files_only=UpperCamelCase__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(UpperCamelCase__ , encoding="utf-8" ) as reader:
return json.load(UpperCamelCase__ )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self) -> Any:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase)
def _lowerCamelCase ( cls , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
_A : Optional[int] = kwargs.pop("config" , __lowerCamelCase)
_A : Tuple = kwargs.pop("trust_remote_code" , __lowerCamelCase)
_A : List[Any] = True
_A , _A : Optional[int] = FeatureExtractionMixin.get_feature_extractor_dict(__lowerCamelCase , **__lowerCamelCase)
_A : List[Any] = config_dict.get("feature_extractor_type" , __lowerCamelCase)
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {}):
_A : Any = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase):
_A : str = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
# It could be in `config.feature_extractor_type``
_A : List[Any] = getattr(__lowerCamelCase , "feature_extractor_type" , __lowerCamelCase)
if hasattr(__lowerCamelCase , "auto_map") and "AutoFeatureExtractor" in config.auto_map:
_A : Optional[int] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
_A : List[Any] = feature_extractor_class_from_name(__lowerCamelCase)
_A : Any = feature_extractor_auto_map is not None
_A : Optional[int] = feature_extractor_class is not None or type(__lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING
_A : int = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
if has_remote_code and trust_remote_code:
_A : List[Any] = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
_A : List[str] = kwargs.pop("code_revision" , __lowerCamelCase)
if os.path.isdir(__lowerCamelCase):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(__lowerCamelCase)]
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase)
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}")
@staticmethod
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
FEATURE_EXTRACTOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase)
| 11 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for _ in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: List[str] = []
for step in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )
scheduler.load_state_dict(_UpperCAmelCase )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(100):
SCREAMING_SNAKE_CASE_: Dict = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , )
for _ in range(1000):
SCREAMING_SNAKE_CASE_: List[Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase : Optional[Any] = 10
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = data
SCREAMING_SNAKE_CASE_: List[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
SCREAMING_SNAKE_CASE_: int = unwrap_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE_: List[str] = scheduler_func(self.optimizer , **lowerCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: Tuple = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"failed for {scheduler_func} in save and reload")
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = fn
def __call__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple):
return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = list(map(self , scheduler.lr_lambdas))
| 13 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list , A__ : int ):
'''simple docstring'''
if len(A__ ) <= 1 or n <= 1:
return
insert_next(A__ , n - 1 )
rec_insertion_sort(A__ , n - 1 )
def lowerCamelCase__ ( A__ : list , A__ : int ):
'''simple docstring'''
if index >= len(A__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowerCamelCase, __lowerCamelCase = (
collection[index],
collection[index - 1],
)
insert_next(A__ , index + 1 )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter integers separated by spaces: ')
UpperCAmelCase_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 12 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowerCAmelCase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_: Optional[int] = self.input_schema.copy()
SCREAMING_SNAKE_CASE_: Dict = features[self.audio_column]
SCREAMING_SNAKE_CASE_: int = input_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 13 | 0 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : List[str] = 16
_lowerCamelCase : int = 32
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 16 ) -> List[str]:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ = DatasetDict(
{
'''train''': dataset['''train'''].select(lowercase_ ),
'''validation''': dataset['''train'''].select(lowercase_ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
lowercase_ , padding='''longest''' , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ = DataLoader(
tokenized_datasets['''test'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader, test_dataloader
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
# Download the dataset
A__ = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowercase_ ):
A__ , A__ , A__ = get_fold_dataloaders(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**lowercase_ )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**lowercase_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**lowercase_ )
A__ = outputs.logits
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowercase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(lowercase_ , dim=0 )
A__ = torch.stack(lowercase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=lowercase_ , references=lowercase_ )
accelerator.print('''Average test metrics from all folds:''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=lowercase_ , default=3 , help='''The number of splits to perform across the dataset''' )
A__ = parser.parse_args()
A__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 14 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = 20
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1)
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: str = 10
SCREAMING_SNAKE_CASE_: Tuple = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_: Any = 5
SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Dict = 10
SCREAMING_SNAKE_CASE_: Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 20
SCREAMING_SNAKE_CASE_: List[str] = 4
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_: int = 5
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = 15
SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: str = 4
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_: List[Any] = 3
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = 20
SCREAMING_SNAKE_CASE_: Optional[Any] = 4
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[Any] = 5
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = 4
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: int = 15
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: List[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# with processor list
SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[str] = 15
SCREAMING_SNAKE_CASE_: List[Any] = 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE :List[Any] = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 |
import math
import sys
def A_ ( _UpperCAmelCase ):
if number != int(_UpperCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE_: List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE_: str = sys.maxsize
SCREAMING_SNAKE_CASE_: List[Any] = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __A :
'''simple docstring'''
def UpperCAmelCase ( self : Any ,_snake_case : Optional[Any] ,_snake_case : Tuple ) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : Optional[Any] ,_snake_case : Dict ,_snake_case : Optional[Any]=None ,**_snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case ,_snake_case )
lowercase__ : Tuple = TFVisionTextDualEncoderModel(_snake_case )
lowercase__ : Union[str, Any] = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase ( self : int ,_snake_case : Any ,_snake_case : Any ,_snake_case : Any ,_snake_case : int ,_snake_case : List[str]=None ,**_snake_case : str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.get_vision_text_model(_snake_case ,_snake_case )
lowercase__ : str = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case )
lowercase__ : Dict = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : int ,_snake_case : Tuple ,_snake_case : Optional[Any] ,_snake_case : List[Any] ,_snake_case : str=None ,**_snake_case : str ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.get_vision_text_model(_snake_case ,_snake_case )
lowercase__ : str = {'''vision_model''': vision_model, '''text_model''': text_model}
lowercase__ : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
lowercase__ : Tuple = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Tuple ,_snake_case : Any ,_snake_case : List[str] ,_snake_case : List[Any] ,_snake_case : Any=None ,**_snake_case : List[str] ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ : int = self.get_vision_text_model(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case )
lowercase__ : List[Any] = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case )
lowercase__ : Tuple = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowercase__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
lowercase__ : Any = model(input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case )
lowercase__ : Dict = after_output[0].numpy()
lowercase__ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case ,1e-5 )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ,_snake_case : str ,_snake_case : str ,_snake_case : Optional[Any] ,_snake_case : Optional[int]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.get_vision_text_model(_snake_case ,_snake_case )
lowercase__ : Dict = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case )
lowercase__ : Optional[Any] = model(
input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case ,output_attentions=_snake_case )
lowercase__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : Tuple = to_atuple(vision_model.config.image_size )
lowercase__ : int = to_atuple(vision_model.config.patch_size )
lowercase__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase__ : Any = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
lowercase__ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : np.ndarray ,_snake_case : np.ndarray ,_snake_case : float ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = np.abs((a - b) ).max()
self.assertLessEqual(_snake_case ,_snake_case ,f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[Any] = self.get_pretrained_model_and_inputs()
lowercase__ : str = model_a(**_snake_case )
lowercase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
lowercase__ : Dict = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
lowercase__ : Tuple = model_a(**_snake_case )
lowercase__ : Optional[int] = after_outputs[0].numpy()
lowercase__ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case ,1e-5 )
@require_tf
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' ,'''hf-internal-testing/tiny-random-bert''' )
lowercase__ : Any = 13
lowercase__ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase__ : Union[str, Any] = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
lowercase__ : int = random_attention_mask([batch_size, 4] )
lowercase__ : int = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFViTModel(_snake_case ,name='''vision_model''' )
lowercase__ : Optional[int] = TFBertModel(_snake_case ,name='''text_model''' )
return vision_model, text_model
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFViTModelTester(self )
lowercase__ : List[str] = TFBertModelTester(self )
lowercase__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
lowercase__ : str = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = vision_config_and_inputs
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' ,'''hf-internal-testing/tiny-random-roberta''' )
lowercase__ : Optional[Any] = 13
lowercase__ : Union[str, Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase__ : Tuple = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
lowercase__ : Tuple = random_attention_mask([batch_size, 4] )
lowercase__ : str = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ,_snake_case : Optional[int] ,_snake_case : Union[str, Any] ,_snake_case : Optional[int] ,_snake_case : Dict=None ,**_snake_case : str ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ : List[Any] = self.get_vision_text_model(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=_snake_case ,text_model=_snake_case )
lowercase__ : str = model(
input_ids=_snake_case ,pixel_values=_snake_case ,attention_mask=_snake_case ,output_attentions=_snake_case )
lowercase__ : int = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) ,vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase__ : Union[str, Any] = to_atuple(vision_model.config.image_size )
lowercase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowercase__ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase__ : Dict = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
lowercase__ : Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : List[Any] ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = TFDeiTModel(_snake_case ,name='''vision_model''' )
lowercase__ : int = TFRobertaModel(_snake_case ,name='''text_model''' )
return vision_model, text_model
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ : Tuple = TFDeiTModelTester(self )
lowercase__ : Optional[Any] = TFRobertaModelTester(self )
lowercase__ : List[str] = vit_model_tester.prepare_config_and_inputs()
lowercase__ : Dict = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = vision_config_and_inputs
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' ,'''hf-internal-testing/tiny-random-bert''' )
lowercase__ : List[str] = 13
lowercase__ : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase__ : Any = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
lowercase__ : Dict = random_attention_mask([batch_size, 4] )
lowercase__ : Dict = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ,_snake_case : int ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = TFCLIPVisionModel(_snake_case ,name='''vision_model''' )
lowercase__ : Optional[int] = TFBertModel(_snake_case ,name='''text_model''' )
return vision_model, text_model
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = TFCLIPVisionModelTester(self )
lowercase__ : List[Any] = TFBertModelTester(self )
lowercase__ : List[Any] = clip_model_tester.prepare_config_and_inputs()
lowercase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ : Tuple = vision_config_and_inputs
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' ,logit_scale_init_value=1.0 ,from_pt=_snake_case )
lowercase__ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowercase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Any = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] ,images=_snake_case ,padding=_snake_case ,return_tensors='''np''' )
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
lowercase__ : List[Any] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() ,_snake_case ,atol=1e-3 ) )
| 16 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | 0 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _A ( ) -> tuple[list[int], int]:
'''simple docstring'''
__lowercase = [randint(-1000, 1000) for i in range(10)]
__lowercase = randint(-5000, 5000)
return (arr, r)
_a = make_dataset()
def _A ( UpperCamelCase_ : list[int], UpperCamelCase_ : int) -> tuple[int, ...]:
'''simple docstring'''
for triplet in permutations(UpperCamelCase_, 3):
if sum(UpperCamelCase_) == target:
return tuple(sorted(UpperCamelCase_))
return (0, 0, 0)
def _A ( UpperCamelCase_ : list[int], UpperCamelCase_ : int) -> tuple[int, int, int]:
'''simple docstring'''
arr.sort()
__lowercase = len(UpperCamelCase_)
for i in range(n - 1):
__lowercase ,__lowercase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _A ( ) -> tuple[float, float]:
'''simple docstring'''
__lowercase = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
__lowercase = "\ntriplet_sum1(*dataset)\n"
__lowercase = "\ntriplet_sum2(*dataset)\n"
__lowercase = repeat(setup=UpperCamelCase_, stmt=UpperCamelCase_, repeat=5, number=10000)
__lowercase = repeat(setup=UpperCamelCase_, stmt=UpperCamelCase_, repeat=5, number=10000)
return (min(UpperCamelCase_), min(UpperCamelCase_))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = solution_times()
print(F"The time for naive implementation is {times[0]}.")
print(F"The time for optimized implementation is {times[1]}.")
| 17 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13 | 0 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCamelCase : Union[str, Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def _snake_case ( lowerCAmelCase : Any=True ):
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class a__ ( A__ ):
A = None
A = None
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : List[str] ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset_module_factory(_A,cache_dir=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = import_main_class(dataset_module.module_path,dataset=_A )
SCREAMING_SNAKE_CASE_ : DatasetBuilder = builder_cls(
cache_dir=_A,config_name=_A,hash=dataset_module.hash,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_A ).replace(os.sep,"/" ),
config.DATASET_INFO_FILENAME,
] )
SCREAMING_SNAKE_CASE_ : Optional[int] = cached_path(_A,cache_dir=_A )
self.assertTrue(os.path.exists(_A ) )
@pytest.mark.integration
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
SCREAMING_SNAKE_CASE_ : List[str] = dataset_module_factory("wikipedia" , cache_dir=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = import_main_class(dataset_module.module_path )
SCREAMING_SNAKE_CASE_ : DatasetBuilder = builder_cls(
cache_dir=lowerCAmelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE_ : Optional[int] = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE_ : Optional[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset_module_factory("wikipedia" , cache_dir=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : DatasetBuilder = builder_cls(
cache_dir=lowerCAmelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE_ : List[Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert "train" in ds
assert isinstance(ds["train"] , lowerCAmelCase )
assert next(iter(ds["train"] ) )
| 18 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ='''▁'''
__A ={'''vocab_file''': '''sentencepiece.bpe.model'''}
__A ={
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__A ={
'''facebook/xglm-564M''': 2_0_4_8,
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase = None , **lowercase , ) -> None:
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCamelCase_ = 7
lowerCamelCase_ = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
lowerCamelCase_ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
lowerCamelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
lowerCamelCase_ = len(self.sp_model )
lowerCamelCase_ = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowercase )
lowerCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
lowerCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase ) -> str:
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCamelCase_ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase ))
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase ))
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ = self.sp_model.PieceToId(lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = "".join(lowercase ).replace(lowercase , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 19 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
import functools
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
lowercase : Any = len(SCREAMING_SNAKE_CASE__ )
@functools.cache
def min_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowercase : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE__ ) , 1 + min_distance(SCREAMING_SNAKE_CASE__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
from collections.abc import Callable
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Callable | None = None):
# Stores actual heap items.
SCREAMING_SNAKE_CASE_: list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_: dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_: Any = key or (lambda lowerCAmelCase__: x)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int):
return int((i - 1) / 2) if i > 0 else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 2)
return right if 0 < right < self.size else None
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.arr[j], self.arr[i]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
return self.arr[i][1] < self.arr[j][1]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Any = self._left(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self._right(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = i
if left is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[int] = left
if right is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = right
return valid_parent
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = self._parent(lowerCAmelCase__)
while parent is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = parent, self._parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = self._get_valid_parent(lowerCAmelCase__)
while valid_parent != index:
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = valid_parent, self._get_valid_parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Any = self.pos_map[item]
SCREAMING_SNAKE_CASE_: int = [item, self.key(lowerCAmelCase__)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Optional[Any] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_: List[str] = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_: Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase__)])
else:
SCREAMING_SNAKE_CASE_: str = [item, self.key(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.arr[0] if self.size else None
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : List[str] = filter(lambda lowerCamelCase_ : p.requires_grad , model.parameters() )
_lowercase : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
if metric == "rouge2":
_lowercase : List[str] = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_lowercase : Union[str, Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_lowercase : Optional[Any] = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
_lowercase : Any = ModelCheckpoint(
dirpath=lowerCamelCase_ , filename=lowerCamelCase_ , monitor=F'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=lowerCamelCase_ , verbose=lowerCamelCase_ , )
class _lowerCamelCase( pl.Callback ):
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Dict = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lowerCamelCase)
@rank_zero_only
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True) -> None:
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''')
_lowercase : str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_lowercase : Optional[Any] = Path(pl_module.hparams.output_dir)
if type_path == "test":
_lowercase : Optional[int] = od / 'test_results.txt'
_lowercase : Optional[Any] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowercase : Union[str, Any] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_lowercase : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowerCamelCase)
generations_file.parent.mkdir(exist_ok=lowerCamelCase)
with open(lowerCamelCase, 'a+') as writer:
for key in sorted(lowerCamelCase):
if key in ["log", "progress_bar", "preds"]:
continue
_lowercase : Optional[Any] = metrics[key]
if isinstance(lowerCamelCase, torch.Tensor):
_lowercase : Optional[int] = val.item()
_lowercase : Union[str, Any] = F'''{key}: {val:.6f}\n'''
writer.write(lowerCamelCase)
if not save_generations:
return
if "preds" in metrics:
_lowercase : Tuple = '\n'.join(metrics['preds'])
generations_file.open('w+').write(lowerCamelCase)
@rank_zero_only
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
try:
_lowercase : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
_lowercase : List[Any] = pl_module.model.num_parameters()
_lowercase : Optional[int] = count_trainable_parameters(lowerCamelCase)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
save_json(pl_module.metrics, pl_module.metrics_save_path)
return self._write_logs(lowerCamelCase, lowerCamelCase, 'test')
@rank_zero_only
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
save_json(pl_module.metrics, pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 21 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Dict = TaTokenizerFast
lowerCAmelCase : Optional[int] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''Muhammad Umer Farooq'''
__SCREAMING_SNAKE_CASE :Optional[int] = '''MIT'''
__SCREAMING_SNAKE_CASE :Any = '''1.0.0'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''Muhammad Umer Farooq'''
__SCREAMING_SNAKE_CASE :Optional[int] = '''[email protected]'''
__SCREAMING_SNAKE_CASE :Any = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A_ ( lowerCAmelCase_ ):
def __init__( self : Tuple , snake_case_ : str ):
super().__init__()
_UpperCAmelCase = []
_UpperCAmelCase = domain
def lowercase ( self : List[Any] , snake_case_ : str , snake_case_ : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCAmelCase = parse.urljoin(self.domain , snake_case_ )
self.urls.append(snake_case_ )
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__lowercase ).split("." )[-2:] )
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return parse.urlparse(__lowercase ).netloc
def UpperCAmelCase_ ( __lowercase : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
_UpperCAmelCase = get_domain_name(__lowercase )
# Initialize the parser
_UpperCAmelCase = Parser(__lowercase )
try:
# Open URL
_UpperCAmelCase = requests.get(__lowercase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCAmelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCAmelCase = requests.get(__lowercase )
# Get the valid email.
_UpperCAmelCase = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowercase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = emails_from_url('''https://github.com''')
print(F"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 22 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] = ShapEPipeline
_UpperCAmelCase : Tuple = ['''prompt''']
_UpperCAmelCase : Dict = ['''prompt''']
_UpperCAmelCase : Any = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Optional[int] = False
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return 8
@property
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE_: Any = PriorTransformer(**lowerCAmelCase__)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_: Optional[int] = ShapERenderer(**lowerCAmelCase__)
return model
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.dummy_prior
SCREAMING_SNAKE_CASE_: Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] = self.dummy_renderer
SCREAMING_SNAKE_CASE_: Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_: Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = "cpu"
SCREAMING_SNAKE_CASE_: Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Dict = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE_: Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Dict = torch_device == "cpu"
SCREAMING_SNAKE_CASE_: List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 1
SCREAMING_SNAKE_CASE_: Any = 2
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_inputs(lowerCAmelCase__)
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_: List[Any] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_: Tuple = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
SCREAMING_SNAKE_CASE_: List[str] = ShapEPipeline.from_pretrained("openai/shap-e")
SCREAMING_SNAKE_CASE_: Optional[int] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
SCREAMING_SNAKE_CASE_: int = pipe(
"a shark" , generator=lowerCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 13 | 0 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=1000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase : str = n - 1
UpperCAmelCase : List[Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase : List[str] = 0
while count < prec:
UpperCAmelCase : int = random.randint(2 , n - 1 )
UpperCAmelCase : List[str] = bin_exp_mod(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if b != 1:
UpperCAmelCase : int = True
for _ in range(_lowerCAmelCase ):
if b == n - 1:
UpperCAmelCase : Dict = False
break
UpperCAmelCase : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCamelCase__: Optional[int] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 23 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 13 | 0 |
def lowerCamelCase__ ( snake_case_ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__snake_case = set()
# Replace all the whitespace in our sentence
__snake_case = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(snake_case_ ) == 26
def lowerCamelCase__ ( snake_case_ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__snake_case = [False] * 26
for char in input_str:
if char.islower():
__snake_case = True
elif char.isupper():
__snake_case = True
return all(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowerCamelCase__ ( ) -> None:
from timeit import timeit
__snake_case = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=snake_case_ ) )
print(timeit('''is_pangram_faster()''' , setup=snake_case_ ) )
print(timeit('''is_pangram_fastest()''' , setup=snake_case_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 |
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""do_resize""": True,
"""size""": {"""height""": 2_24, """width""": 2_24},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
"""do_convert_rgb""": True,
}
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Any = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Dict = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = """Alexandra,T-shirt的价格是15便士。"""
SCREAMING_SNAKE_CASE__ : Tuple = processor(text=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """Alexandra,T-shirt的价格是15便士。"""
SCREAMING_SNAKE_CASE__ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = """Alexandra,T-shirt的价格是15便士。"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 25 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase : Any = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
if rng is None:
SCREAMING_SNAKE_CASE_: List[Any] = random.Random()
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for _ in range(_UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_: List[Any] = np.array(_UpperCAmelCase , dtype=jnp.intaa ).reshape(_UpperCAmelCase )
return output
def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor(_UpperCAmelCase , vocab_size=2 , rng=_UpperCAmelCase )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
return attn_mask
@require_flax
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = ()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[int] = inputs["input_ids"].shape[-1] // 2
SCREAMING_SNAKE_CASE_: List[str] = inputs["input_ids"][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_: Any = jnp.ones_like(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_: Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_: Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: Dict = max_length
SCREAMING_SNAKE_CASE_: List[Any] = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: str = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , flax_model.params)
SCREAMING_SNAKE_CASE_: List[Any] = flax_model.generate(lowerCAmelCase__).sequences
SCREAMING_SNAKE_CASE_: str = pt_model.generate(torch.tensor(lowerCAmelCase__ , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_: List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[Any] = True
SCREAMING_SNAKE_CASE_: Dict = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Dict = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
SCREAMING_SNAKE_CASE_: Optional[int] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[int] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: str = False
SCREAMING_SNAKE_CASE_: int = max_length
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Tuple = True
SCREAMING_SNAKE_CASE_: List[str] = max_length
SCREAMING_SNAKE_CASE_: Any = 0.8
SCREAMING_SNAKE_CASE_: Any = 10
SCREAMING_SNAKE_CASE_: List[str] = 0.3
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: int = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: List[str] = 2
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: Tuple = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Any = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: List[Any] = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[int] = True
SCREAMING_SNAKE_CASE_: Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
SCREAMING_SNAKE_CASE_: Any = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
SCREAMING_SNAKE_CASE_: List[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
SCREAMING_SNAKE_CASE_: Optional[int] = "Hello world"
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase__ , "do_samples"):
model.generate(lowerCAmelCase__ , do_samples=lowerCAmelCase__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase__ , "foo"):
SCREAMING_SNAKE_CASE_: str = {"foo": "bar"}
model.generate(lowerCAmelCase__ , **lowerCAmelCase__)
| 13 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> List[str]:
_A : Optional[Any] = parent
_A : int = batch_size
_A : List[Any] = seq_length
_A : List[Any] = is_training
_A : str = use_input_mask
_A : List[Any] = use_token_type_ids
_A : Any = use_labels
_A : Dict = vocab_size
_A : List[str] = hidden_size
_A : Optional[int] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Tuple = intermediate_size
_A : Optional[int] = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : Any = type_vocab_size
_A : Optional[Any] = type_sequence_label_size
_A : Optional[Any] = initializer_range
_A : Any = num_labels
_A : int = num_choices
_A : Any = scope
def a__ ( self ) -> List[Any]:
_A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Union[str, Any] = None
if self.use_input_mask:
_A : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_A : Tuple = None
if self.use_token_type_ids:
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
if self.use_labels:
_A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_A : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> List[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_A : Optional[int] = LlamaModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a , attention_mask=_a )
_A : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]:
_A : Union[str, Any] = True
_A : Any = LlamaModel(_a )
model.to(_a )
model.eval()
_A : int = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_A : Optional[int] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
_A : Optional[int] = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
_A : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Any:
_A : Optional[Any] = True
_A : Tuple = True
_A : str = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
_A : Union[str, Any] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
_A : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
_A : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_A : Dict = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
_A : Optional[int] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
_A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> int:
_A : List[Any] = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Any = config_and_inputs
_A : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_a = (LlamaForCausalLM,) if is_torch_available() else ()
_a = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = LlamaModelTester(self )
_A : Dict = ConfigTester(self , config_class=_a , hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Union[str, Any]:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A : Optional[Any] = type
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> List[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[str] = 3
_A : Optional[Any] = input_dict["""input_ids"""]
_A : Tuple = input_ids.ne(1 ).to(_a )
_A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A : Tuple = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : int = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> Tuple:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : int = 3
_A : Union[str, Any] = """single_label_classification"""
_A : Union[str, Any] = input_dict["""input_ids"""]
_A : int = input_ids.ne(1 ).to(_a )
_A : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A : Optional[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> str:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[str] = 3
_A : Tuple = """multi_label_classification"""
_A : List[str] = input_dict["""input_ids"""]
_A : Union[str, Any] = input_ids.ne(1 ).to(_a )
_A : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_A : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Tuple = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def a__ ( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def a__ ( self , _a ) -> Any:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_A : Dict = ids_tensor([1, 10] , config.vocab_size )
_A : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A : str = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
_A : Dict = original_model(_a ).last_hidden_state
_A : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
_A : Optional[Any] = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
_A : Tuple = scaled_model(_a ).last_hidden_state
_A : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
@require_torch
class lowercase ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> Dict:
_A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : Tuple = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
_A : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_A : Optional[int] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : Any = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> Any:
_A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
_A : Dict = model(torch.tensor(_a ) )
# Expected mean on dim = -1
_A : Dict = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> Optional[Any]:
_A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : Tuple = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
_A : Optional[int] = model(torch.tensor(_a ) )
# Expected mean on dim = -1
_A : Tuple = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
_A : int = model(torch.tensor(_a ) )
_A : Tuple = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# fmt: off
_A : Any = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def a__ ( self ) -> Any:
_A : Any = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
_A : List[Any] = """Simply put, the theory of relativity states that """
_A : Dict = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
_A : List[str] = tokenizer.encode(_a , return_tensors="""pt""" )
_A : Optional[Any] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=_a )
# greedy generation outputs
_A : Optional[Any] = model.generate(_a , max_new_tokens=64 , top_p=_a , temperature=1 , do_sample=_a )
_A : Union[str, Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
| 26 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase : Union[str, Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = create_model(
"HTSAT-tiny" , "roberta" , _UpperCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_UpperCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Tuple = R".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE_: Dict = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_: Any = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_: Optional[int] = re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 )
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_UpperCAmelCase )//3}.linear." )
elif re.match(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_: Optional[int] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_: Tuple = value
SCREAMING_SNAKE_CASE_: List[str] = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_: Any = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_: Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_: str = query_layer
SCREAMING_SNAKE_CASE_: int = key_layer
SCREAMING_SNAKE_CASE_: List[Any] = value_layer
else:
SCREAMING_SNAKE_CASE_: int = value
return model_state_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = clap_model.state_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = rename_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ClapConfig()
SCREAMING_SNAKE_CASE_: Tuple = enable_fusion
SCREAMING_SNAKE_CASE_: Tuple = ClapModel(_UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
transformers_config.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCAmelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=2 , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_: str = image_size
SCREAMING_SNAKE_CASE_: Tuple = patch_size
SCREAMING_SNAKE_CASE_: int = num_channels
SCREAMING_SNAKE_CASE_: List[str] = is_training
SCREAMING_SNAKE_CASE_: str = use_labels
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Any = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Dict = scope
SCREAMING_SNAKE_CASE_: Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_: List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Dict = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = ViTForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = ViTModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_: str = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Any = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE_: str = ViTModel.from_pretrained("facebook/dino-vits8").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480)
SCREAMING_SNAKE_CASE_: List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: int = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Tuple = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto")
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Dict = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: str = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
_lowerCamelCase : str = [True] * 100_0001
_lowerCamelCase : str = 2
while i * i <= 100_0000:
if seive[i]:
for j in range(i * i, 100_0001, i):
_lowerCamelCase : Optional[int] = False
i += 1
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
return seive[n]
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
return any(digit in '02468' for digit in str(A__ ) )
def __lowerCamelCase ( A__ = 1_000_000 ) -> list[int]:
"""simple docstring"""
UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(A__ ) and not contains_an_even_digit(A__ ):
UpperCamelCase = str(A__ )
UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(A__ ) )]
if all(is_prime(A__ ) for i in list_nums ):
result.append(A__ )
return result
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'''{len(find_circular_primes()) = }''')
| 28 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a ( snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = AutoConfig.from_pretrained(snake_case__ )
lowercase_ = FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case__ )
lowercase_ = checkpoints.load_tax_checkpoint(snake_case__ )
lowercase_ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowercase_ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase_ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowercase_ = F'''layers_{str(snake_case__ )}'''
# Self-Attention
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowercase_ = flax_model.params['''encoder''']['''block'''][str(snake_case__ )]['''layer''']
lowercase_ = tax_attention_key
lowercase_ = tax_attention_out
lowercase_ = tax_attention_query
lowercase_ = tax_attention_value
lowercase_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = tax_global_layer_norm
if split_mlp_wi:
lowercase_ = tax_mlp_wi_a
lowercase_ = tax_mlp_wi_a
else:
lowercase_ = tax_mlp_wi
lowercase_ = tax_mlp_wo
lowercase_ = tax_mlp_layer_norm
lowercase_ = flax_model_encoder_layer_block
# Only for layer 0:
lowercase_ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowercase_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase_ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowercase_ = tax_encoder_global_rel_embedding
# Assigning
lowercase_ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowercase_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowercase_ = F'''layers_{str(snake_case__ )}'''
# Self-Attention
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowercase_ = tax_enc_dec_attention_module['''key''']['''kernel''']
lowercase_ = tax_enc_dec_attention_module['''out''']['''kernel''']
lowercase_ = tax_enc_dec_attention_module['''query''']['''kernel''']
lowercase_ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowercase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowercase_ = flax_model.params['''decoder''']['''block'''][str(snake_case__ )]['''layer''']
lowercase_ = tax_attention_key
lowercase_ = tax_attention_out
lowercase_ = tax_attention_query
lowercase_ = tax_attention_value
lowercase_ = tax_pre_attention_layer_norm
lowercase_ = tax_enc_dec_attention_key
lowercase_ = tax_enc_dec_attention_out
lowercase_ = tax_enc_dec_attention_query
lowercase_ = tax_enc_dec_attention_value
lowercase_ = tax_cross_layer_norm
if split_mlp_wi:
lowercase_ = tax_mlp_wi_a
lowercase_ = tax_mlp_wi_a
else:
lowercase_ = tax_mlp_wi
lowercase_ = tax_mlp_wo
lowercase_ = txa_mlp_layer_norm
lowercase_ = flax_model_decoder_layer_block
# Decoder Normalization
lowercase_ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowercase_ = txa_decoder_norm
# Only for layer 0:
lowercase_ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowercase_ = tax_decoder_rel_embedding
# Token Embeddings
lowercase_ = tax_model['''target''']['''token_embedder''']['''embedding''']
lowercase_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase_ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(snake_case__ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__a = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 30 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = XGLMTokenizer
_UpperCAmelCase : List[Any] = XGLMTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[Any] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "<pad>"
SCREAMING_SNAKE_CASE_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase__) , 1008)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def _SCREAMING_SNAKE_CASE ( self : str):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
SCREAMING_SNAKE_CASE_: Tuple = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "Hello World!"
SCREAMING_SNAKE_CASE_: Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
# fmt: off
SCREAMING_SNAKE_CASE_: str = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 13 | 0 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__SCREAMING_SNAKE_CASE : Any = """bert-base-cased"""
__SCREAMING_SNAKE_CASE : Any = """fp16"""
__SCREAMING_SNAKE_CASE : Optional[Any] = """bf16"""
__SCREAMING_SNAKE_CASE : Dict = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
super().setUp()
_UpperCAmelCase : Optional[int] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def _A ( self : Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = self.dist_env.copy()
_UpperCAmelCase : Any = F"""{i + 1}"""
_UpperCAmelCase : str = strategy
with mockenv_context(**A ):
_UpperCAmelCase : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _A ( self : Any ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = self.dist_env.copy()
_UpperCAmelCase : Optional[Any] = prefetch_policy
with mockenv_context(**A ):
_UpperCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _A ( self : int ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(A ):
_UpperCAmelCase : Optional[int] = self.dist_env.copy()
_UpperCAmelCase : Tuple = state_dict_type
with mockenv_context(**A ):
_UpperCAmelCase : List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _A ( self : List[str] ):
_UpperCAmelCase : List[str] = AutoModel.from_pretrained(A )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCAmelCase : Any = self.dist_env.copy()
_UpperCAmelCase : Optional[int] = policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCAmelCase : Optional[Any] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
_UpperCAmelCase : Any = "2000"
with mockenv_context(**A ):
_UpperCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCAmelCase : Any = self.dist_env.copy()
_UpperCAmelCase : Tuple = "TRANSFORMER_BASED_WRAP"
_UpperCAmelCase : Tuple = "T5Layer"
with mockenv_context(**A ):
_UpperCAmelCase : Dict = FullyShardedDataParallelPlugin()
with self.assertRaises(A ) as cm:
fsdp_plugin.set_auto_wrap_policy(A )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : Optional[int] = "SIZE_BASED_WRAP"
_UpperCAmelCase : List[str] = "0"
with mockenv_context(**A ):
_UpperCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _A ( self : List[str] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : str = mp_dtype
with mockenv_context(**A ):
_UpperCAmelCase : int = Accelerator()
if mp_dtype == "fp16":
_UpperCAmelCase : Union[str, Any] = torch.floataa
elif mp_dtype == "bf16":
_UpperCAmelCase : Union[str, Any] = torch.bfloataa
_UpperCAmelCase : Optional[int] = MixedPrecision(param_dtype=A , reduce_dtype=A , buffer_dtype=A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(A )
def _A ( self : Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : Dict = str(A ).lower()
with mockenv_context(**A ):
_UpperCAmelCase : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=A ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : List[Any] ):
super().setUp()
_UpperCAmelCase : Optional[int] = 0.82
_UpperCAmelCase : int = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
_UpperCAmelCase : Tuple = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCAmelCase : Tuple = 160
_UpperCAmelCase : Any = 160
_UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = os.path.join(self.test_scripts_folder , "test_performance.py" )
_UpperCAmelCase : Optional[int] = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
_UpperCAmelCase : Tuple = cmd.copy()
for i, strategy in enumerate(A ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def _A ( self : List[Any] ):
_UpperCAmelCase : Dict = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
_UpperCAmelCase : Union[str, Any] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(A ):
_UpperCAmelCase : Optional[Any] = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_UpperCAmelCase : Optional[Any] = len(A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCAmelCase : Optional[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
_UpperCAmelCase : Optional[int] = cmd_config[:-1]
_UpperCAmelCase : List[str] = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def _A ( self : List[Any] ):
_UpperCAmelCase : str = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
_UpperCAmelCase : Tuple = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCAmelCase : str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(A ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
| 31 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_: int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_: Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_: Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase : List[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 13 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> None:
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 32 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , A : List[Any] , A : Tuple=7 , A : Dict=3 , A : Optional[Any]=18 , A : Union[str, Any]=30 , A : List[str]=4_00 , A : List[Any]=True , A : Union[str, Any]=32 , A : Any=True , ) -> List[Any]:
lowercase_ : Optional[Any] = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Any = num_channels
lowercase_ : List[str] = image_size
lowercase_ : Optional[int] = min_resolution
lowercase_ : Dict = max_resolution
lowercase_ : str = do_resize
lowercase_ : Optional[Any] = size_divisor
lowercase_ : List[Any] = do_rescale
def A ( self : Dict ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = GLPNImageProcessor if is_vision_available() else None
def A ( self : Union[str, Any] ) -> List[str]:
lowercase_ : Any = GLPNImageProcessingTester(self )
@property
def A ( self : int ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> int:
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size_divisor''' ) )
self.assertTrue(hasattr(A , '''resample''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
def A ( self : Any ) -> str:
pass
def A ( self : Tuple ) -> Tuple:
# Initialize image_processing
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A ( self : List[Any] ) -> Tuple:
# Initialize image_processing
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A ( self : Optional[int] ) -> List[Any]:
# Initialize image_processing
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 33 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = f"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = f"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = input_str.split("_" )
SCREAMING_SNAKE_CASE_: str = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE_: int = words[start_index:]
SCREAMING_SNAKE_CASE_: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE_: List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( __a ):
__a : Any = (PNDMScheduler,)
__a : Any = (("""num_inference_steps""", 50),)
def A ( self : Dict , **lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase )
return config
def A ( self : Dict , lowercase : Dict=0 , **lowercase : str ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : Tuple ):
'''simple docstring'''
pass
def A ( self : Any , lowercase : Optional[Any]=0 , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : Optional[Any] , **lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase = model(lowercase , lowercase )
UpperCAmelCase = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase = model(lowercase , lowercase )
UpperCAmelCase = scheduler.step_plms(lowercase , lowercase , lowercase ).prev_sample
return sample
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , '''set_timesteps''' ):
UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.step_prk(lowercase , 0 , lowercase , **lowercase ).prev_sample
UpperCAmelCase = scheduler.step_prk(lowercase , 1 , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase = scheduler.step_plms(lowercase , 0 , lowercase , **lowercase ).prev_sample
UpperCAmelCase = scheduler.step_plms(lowercase , 1 , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[str] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A ( self : Any ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase )
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def A ( self : Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def A ( self : List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase )
def A ( self : str ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def A ( self : Tuple ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase )
def A ( self : int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample
def A ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(lowercase ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop()
UpperCAmelCase = torch.sum(torch.abs(lowercase ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
UpperCAmelCase = torch.sum(torch.abs(lowercase ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 )
UpperCAmelCase = torch.sum(torch.abs(lowercase ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 )
UpperCAmelCase = torch.sum(torch.abs(lowercase ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 34 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for _ in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: List[str] = []
for step in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )
scheduler.load_state_dict(_UpperCAmelCase )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(100):
SCREAMING_SNAKE_CASE_: Dict = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , )
for _ in range(1000):
SCREAMING_SNAKE_CASE_: List[Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase : Optional[Any] = 10
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = data
SCREAMING_SNAKE_CASE_: List[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
SCREAMING_SNAKE_CASE_: int = unwrap_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE_: List[str] = scheduler_func(self.optimizer , **lowerCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: Tuple = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"failed for {scheduler_func} in save and reload")
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = fn
def __call__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple):
return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = list(map(self , scheduler.lr_lambdas))
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case( _lowerCAmelCase ) -> float:
if not nums:
raise ValueError("""List is empty""" )
return sum(_lowerCAmelCase ) / len(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowerCAmelCase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_: Optional[int] = self.input_schema.copy()
SCREAMING_SNAKE_CASE_: Dict = features[self.audio_column]
SCREAMING_SNAKE_CASE_: int = input_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 13 | 0 |
from __future__ import annotations
import requests
_snake_case = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def A ( _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = "new" , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowerCamelCase ) - valid_terms ) ):
_lowerCAmelCase : int = F"Invalid search term: {invalid_search_terms}"
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = requests.get(
F"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_lowerCAmelCase : List[str] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowerCamelCase )}
_lowerCAmelCase : str = {}
for id_ in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 36 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = 20
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1)
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: str = 10
SCREAMING_SNAKE_CASE_: Tuple = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_: Any = 5
SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Dict = 10
SCREAMING_SNAKE_CASE_: Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 20
SCREAMING_SNAKE_CASE_: List[str] = 4
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_: int = 5
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = 15
SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: str = 4
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_: List[Any] = 3
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = 20
SCREAMING_SNAKE_CASE_: Optional[Any] = 4
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[Any] = 5
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = 4
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: int = 15
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: List[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# with processor list
SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[str] = 15
SCREAMING_SNAKE_CASE_: List[Any] = 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 13 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : torch.FloatTensor
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase=3 ,__UpperCAmelCase=3 ,__UpperCAmelCase=("DownEncoderBlock2D",) ,__UpperCAmelCase=(64,) ,__UpperCAmelCase=2 ,__UpperCAmelCase=32 ,__UpperCAmelCase="silu" ,__UpperCAmelCase=True ,) -> Optional[int]:
super().__init__()
lowerCAmelCase__ : Union[str, Any] = layers_per_block
lowerCAmelCase__ : Dict = torch.nn.Convad(
__UpperCAmelCase ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[Any] = nn.ModuleList([] )
# down
lowerCAmelCase__ : str = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Any = output_channel
lowerCAmelCase__ : str = block_out_channels[i]
lowerCAmelCase__ : Union[str, Any] = i == len(__UpperCAmelCase ) - 1
lowerCAmelCase__ : Tuple = get_down_block(
__UpperCAmelCase ,num_layers=self.layers_per_block ,in_channels=__UpperCAmelCase ,out_channels=__UpperCAmelCase ,add_downsample=not is_final_block ,resnet_eps=1E-6 ,downsample_padding=0 ,resnet_act_fn=__UpperCAmelCase ,resnet_groups=__UpperCAmelCase ,attention_head_dim=__UpperCAmelCase ,temb_channels=__UpperCAmelCase ,)
self.down_blocks.append(__UpperCAmelCase )
# mid
lowerCAmelCase__ : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=__UpperCAmelCase ,output_scale_factor=1 ,resnet_time_scale_shift="""default""" ,attention_head_dim=block_out_channels[-1] ,resnet_groups=__UpperCAmelCase ,temb_channels=__UpperCAmelCase ,)
# out
lowerCAmelCase__ : List[str] = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=__UpperCAmelCase ,eps=1E-6 )
lowerCAmelCase__ : List[str] = nn.SiLU()
lowerCAmelCase__ : int = 2 * out_channels if double_z else out_channels
lowerCAmelCase__ : Dict = nn.Convad(block_out_channels[-1] ,__UpperCAmelCase ,3 ,padding=1 )
lowerCAmelCase__ : Optional[int] = False
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Any = x
lowerCAmelCase__ : Optional[Any] = self.conv_in(__UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase ):
def custom_forward(*__UpperCAmelCase ):
return module(*__UpperCAmelCase )
return custom_forward
# down
if is_torch_version(""">=""" ,"""1.11.0""" ):
for down_block in self.down_blocks:
lowerCAmelCase__ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) ,__UpperCAmelCase ,use_reentrant=__UpperCAmelCase )
# middle
lowerCAmelCase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,__UpperCAmelCase ,use_reentrant=__UpperCAmelCase )
else:
for down_block in self.down_blocks:
lowerCAmelCase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) ,__UpperCAmelCase )
# middle
lowerCAmelCase__ : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,__UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
lowerCAmelCase__ : Union[str, Any] = down_block(__UpperCAmelCase )
# middle
lowerCAmelCase__ : Optional[int] = self.mid_block(__UpperCAmelCase )
# post-process
lowerCAmelCase__ : str = self.conv_norm_out(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.conv_act(__UpperCAmelCase )
lowerCAmelCase__ : int = self.conv_out(__UpperCAmelCase )
return sample
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase=3 ,__UpperCAmelCase=3 ,__UpperCAmelCase=("UpDecoderBlock2D",) ,__UpperCAmelCase=(64,) ,__UpperCAmelCase=2 ,__UpperCAmelCase=32 ,__UpperCAmelCase="silu" ,__UpperCAmelCase="group" ,) -> List[Any]:
super().__init__()
lowerCAmelCase__ : Dict = layers_per_block
lowerCAmelCase__ : Any = nn.Convad(
__UpperCAmelCase ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Dict = nn.ModuleList([] )
lowerCAmelCase__ : Tuple = in_channels if norm_type == """spatial""" else None
# mid
lowerCAmelCase__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=__UpperCAmelCase ,output_scale_factor=1 ,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=__UpperCAmelCase ,temb_channels=__UpperCAmelCase ,)
# up
lowerCAmelCase__ : Optional[int] = list(reversed(__UpperCAmelCase ) )
lowerCAmelCase__ : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = output_channel
lowerCAmelCase__ : int = reversed_block_out_channels[i]
lowerCAmelCase__ : Dict = i == len(__UpperCAmelCase ) - 1
lowerCAmelCase__ : Union[str, Any] = get_up_block(
__UpperCAmelCase ,num_layers=self.layers_per_block + 1 ,in_channels=__UpperCAmelCase ,out_channels=__UpperCAmelCase ,prev_output_channel=__UpperCAmelCase ,add_upsample=not is_final_block ,resnet_eps=1E-6 ,resnet_act_fn=__UpperCAmelCase ,resnet_groups=__UpperCAmelCase ,attention_head_dim=__UpperCAmelCase ,temb_channels=__UpperCAmelCase ,resnet_time_scale_shift=__UpperCAmelCase ,)
self.up_blocks.append(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = output_channel
# out
if norm_type == "spatial":
lowerCAmelCase__ : Optional[Any] = SpatialNorm(block_out_channels[0] ,__UpperCAmelCase )
else:
lowerCAmelCase__ : List[Any] = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=__UpperCAmelCase ,eps=1E-6 )
lowerCAmelCase__ : int = nn.SiLU()
lowerCAmelCase__ : Dict = nn.Convad(block_out_channels[0] ,__UpperCAmelCase ,3 ,padding=1 )
lowerCAmelCase__ : Optional[Any] = False
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = z
lowerCAmelCase__ : List[Any] = self.conv_in(__UpperCAmelCase )
lowerCAmelCase__ : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase ):
def custom_forward(*__UpperCAmelCase ):
return module(*__UpperCAmelCase )
return custom_forward
if is_torch_version(""">=""" ,"""1.11.0""" ):
# middle
lowerCAmelCase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,__UpperCAmelCase ,__UpperCAmelCase ,use_reentrant=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) ,__UpperCAmelCase ,__UpperCAmelCase ,use_reentrant=__UpperCAmelCase )
else:
# middle
lowerCAmelCase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : str = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) ,__UpperCAmelCase ,__UpperCAmelCase )
else:
# middle
lowerCAmelCase__ : Any = self.mid_block(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : int = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ : Dict = up_block(__UpperCAmelCase ,__UpperCAmelCase )
# post-process
if latent_embeds is None:
lowerCAmelCase__ : Union[str, Any] = self.conv_norm_out(__UpperCAmelCase )
else:
lowerCAmelCase__ : List[Any] = self.conv_norm_out(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.conv_act(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.conv_out(__UpperCAmelCase )
return sample
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="random" ,__UpperCAmelCase=False ,__UpperCAmelCase=True ) -> List[Any]:
super().__init__()
lowerCAmelCase__ : List[str] = n_e
lowerCAmelCase__ : Dict = vq_embed_dim
lowerCAmelCase__ : List[Any] = beta
lowerCAmelCase__ : Union[str, Any] = legacy
lowerCAmelCase__ : Dict = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
lowerCAmelCase__ : int = remap
if self.remap is not None:
self.register_buffer("""used""" ,torch.tensor(np.load(self.remap ) ) )
lowerCAmelCase__ : List[Any] = self.used.shape[0]
lowerCAmelCase__ : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCAmelCase__ : Optional[int] = self.re_embed
lowerCAmelCase__ : Dict = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
lowerCAmelCase__ : List[Any] = n_e
lowerCAmelCase__ : Tuple = sane_index_shape
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = inds.shape
assert len(__UpperCAmelCase ) > 1
lowerCAmelCase__ : Optional[int] = inds.reshape(ishape[0] ,-1 )
lowerCAmelCase__ : Any = self.used.to(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = (inds[:, :, None] == used[None, None, ...]).long()
lowerCAmelCase__ : List[Any] = match.argmax(-1 )
lowerCAmelCase__ : Optional[int] = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCAmelCase__ : int = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
lowerCAmelCase__ : Union[str, Any] = self.unknown_index
return new.reshape(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Dict = inds.shape
assert len(__UpperCAmelCase ) > 1
lowerCAmelCase__ : List[str] = inds.reshape(ishape[0] ,-1 )
lowerCAmelCase__ : int = self.used.to(__UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
lowerCAmelCase__ : Any = 0 # simply set to zero
lowerCAmelCase__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,__UpperCAmelCase )
return back.reshape(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
# reshape z -> (batch, height, width, channel) and flatten
lowerCAmelCase__ : Optional[int] = z.permute(0 ,2 ,3 ,1 ).contiguous()
lowerCAmelCase__ : Tuple = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCAmelCase__ : Optional[Any] = torch.argmin(torch.cdist(__UpperCAmelCase ,self.embedding.weight ) ,dim=1 )
lowerCAmelCase__ : Dict = self.embedding(__UpperCAmelCase ).view(z.shape )
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : int = None
# compute loss for embedding
if not self.legacy:
lowerCAmelCase__ : List[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCAmelCase__ : int = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCAmelCase__ : Optional[Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCAmelCase__ : List[Any] = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
lowerCAmelCase__ : Tuple = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
lowerCAmelCase__ : int = self.remap_to_used(__UpperCAmelCase )
lowerCAmelCase__ : Dict = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
lowerCAmelCase__ : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCAmelCase__ : str = indices.reshape(shape[0] ,-1 ) # add batch axis
lowerCAmelCase__ : Dict = self.unmap_to_all(__UpperCAmelCase )
lowerCAmelCase__ : Dict = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCAmelCase__ : Optional[int] = self.embedding(__UpperCAmelCase )
if shape is not None:
lowerCAmelCase__ : str = z_q.view(__UpperCAmelCase )
# reshape back to match original input shape
lowerCAmelCase__ : Dict = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Dict:
lowerCAmelCase__ : int = parameters
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = torch.chunk(__UpperCAmelCase ,2 ,dim=1 )
lowerCAmelCase__ : Optional[Any] = torch.clamp(self.logvar ,-3_0.0 ,2_0.0 )
lowerCAmelCase__ : List[Any] = deterministic
lowerCAmelCase__ : str = torch.exp(0.5 * self.logvar )
lowerCAmelCase__ : str = torch.exp(self.logvar )
if self.deterministic:
lowerCAmelCase__ : Optional[int] = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def UpperCAmelCase_ ( self ,__UpperCAmelCase = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowerCAmelCase__ : str = randn_tensor(
self.mean.shape ,generator=__UpperCAmelCase ,device=self.parameters.device ,dtype=self.parameters.dtype )
lowerCAmelCase__ : List[str] = self.mean + self.std * sample
return x
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ) -> str:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=[1, 2, 3] ) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
lowerCAmelCase__ : str = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.mean
| 37 |
import math
import sys
def A_ ( _UpperCAmelCase ):
if number != int(_UpperCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE_: List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE_: str = sys.maxsize
SCREAMING_SNAKE_CASE_: List[Any] = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
@staticmethod
def _A ( *__lowerCamelCase : Tuple , **__lowerCamelCase : str ):
pass
@is_pipeline_test
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ : Any = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _A ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
UpperCamelCase :Any = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
UpperCamelCase :Tuple = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _A ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
UpperCamelCase :Tuple = object_detector(examples[0] , threshold=0.0 )
UpperCamelCase :Optional[Any] = len(__lowerCamelCase )
self.assertGreater(__lowerCamelCase , 0 )
self.assertEqual(
__lowerCamelCase , [
{
"""score""": ANY(__lowerCamelCase ),
"""label""": ANY(__lowerCamelCase ),
"""box""": {"""xmin""": ANY(__lowerCamelCase ), """ymin""": ANY(__lowerCamelCase ), """xmax""": ANY(__lowerCamelCase ), """ymax""": ANY(__lowerCamelCase )},
}
for i in range(__lowerCamelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _A ( self : Dict ):
pass
@require_torch
def _A ( self : List[str] ):
UpperCamelCase :Tuple = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
UpperCamelCase :Tuple = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
UpperCamelCase :Any = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[int] = pipeline("""zero-shot-object-detection""" )
UpperCamelCase :Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
UpperCamelCase :List[str] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _A ( self : List[Any] ):
pass
@require_torch
@slow
def _A ( self : str ):
UpperCamelCase :Dict = 0.2
UpperCamelCase :Optional[Any] = pipeline("""zero-shot-object-detection""" )
UpperCamelCase :Optional[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def _A ( self : Optional[int] ):
UpperCamelCase :int = 2
UpperCamelCase :List[str] = pipeline("""zero-shot-object-detection""" )
UpperCamelCase :Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 38 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | 0 |
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCAmelCase = grid[0]
for row_n in range(1 , len(__lowerCAmelCase ) ):
_UpperCAmelCase = grid[row_n]
_UpperCAmelCase = fill_row(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = grid[row_n]
return grid[-1][-1]
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """lxmert"""
UpperCAmelCase : str = {}
def __init__( self : int , __UpperCAmelCase : List[str]=30522 , __UpperCAmelCase : Optional[Any]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : List[Any]=9500 , __UpperCAmelCase : Dict=1600 , __UpperCAmelCase : Tuple=400 , __UpperCAmelCase : List[str]=3072 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : Optional[int]=9 , __UpperCAmelCase : List[str]=5 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : List[str]=2048 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Tuple=6.67 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=True , **__UpperCAmelCase : str , ):
a : Tuple = vocab_size
a : Optional[Any] = hidden_size
a : List[str] = num_attention_heads
a : int = hidden_act
a : Dict = intermediate_size
a : Optional[Any] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : str = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = initializer_range
a : Tuple = layer_norm_eps
a : Any = num_qa_labels
a : str = num_object_labels
a : Any = num_attr_labels
a : Union[str, Any] = l_layers
a : int = x_layers
a : str = r_layers
a : Dict = visual_feat_dim
a : int = visual_pos_dim
a : Optional[int] = visual_loss_normalizer
a : Any = task_matched
a : Optional[int] = task_mask_lm
a : Union[str, Any] = task_obj_predict
a : List[str] = task_qa
a : Any = visual_obj_loss
a : Union[str, Any] = visual_attr_loss
a : int = visual_feat_loss
a : int = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**__UpperCAmelCase)
| 40 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : Dict ={
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =[
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Optional[Any] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
from collections.abc import Callable
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Callable | None = None):
# Stores actual heap items.
SCREAMING_SNAKE_CASE_: list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_: dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_: Any = key or (lambda lowerCAmelCase__: x)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int):
return int((i - 1) / 2) if i > 0 else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 2)
return right if 0 < right < self.size else None
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.arr[j], self.arr[i]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
return self.arr[i][1] < self.arr[j][1]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Any = self._left(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self._right(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = i
if left is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[int] = left
if right is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = right
return valid_parent
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = self._parent(lowerCAmelCase__)
while parent is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = parent, self._parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = self._get_valid_parent(lowerCAmelCase__)
while valid_parent != index:
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = valid_parent, self._get_valid_parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Any = self.pos_map[item]
SCREAMING_SNAKE_CASE_: int = [item, self.key(lowerCAmelCase__)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Optional[Any] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_: List[str] = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_: Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase__)])
else:
SCREAMING_SNAKE_CASE_: str = [item, self.key(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.arr[0] if self.size else None
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Any = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BICUBIC , __lowercase = True , __lowercase = None , __lowercase = True , __lowercase = 1 / 255 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , **__lowercase , ) -> None:
super().__init__(**__lowercase)
__UpperCamelCase :Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase :Optional[int] = get_size_dict(__lowercase , default_to_square=__lowercase)
__UpperCamelCase :Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase :Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase , param_name='''crop_size''')
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = size
__UpperCamelCase :List[str] = resample
__UpperCamelCase :Dict = do_center_crop
__UpperCamelCase :Tuple = crop_size
__UpperCamelCase :Optional[Any] = do_rescale
__UpperCamelCase :str = rescale_factor
__UpperCamelCase :Optional[int] = do_normalize
__UpperCamelCase :List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase :List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase :int = do_convert_rgb
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
__UpperCamelCase :Any = get_size_dict(__lowercase , default_to_square=__lowercase)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
__UpperCamelCase :Any = get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase)
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
__UpperCamelCase :List[Any] = get_size_dict(__lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
__UpperCamelCase :str = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Any = size if size is not None else self.size
__UpperCamelCase :int = get_size_dict(__lowercase , param_name='''size''' , default_to_square=__lowercase)
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
__UpperCamelCase :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase :Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase :Dict = get_size_dict(__lowercase , param_name='''crop_size''' , default_to_square=__lowercase)
__UpperCamelCase :str = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase :int = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase :Optional[int] = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase :int = image_std if image_std is not None else self.image_std
__UpperCamelCase :List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase :List[str] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase :Optional[int] = [convert_to_rgb(__lowercase) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase :str = [to_numpy_array(__lowercase) for image in images]
if do_resize:
__UpperCamelCase :Optional[Any] = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase) for image in images]
if do_center_crop:
__UpperCamelCase :Dict = [self.center_crop(image=__lowercase , size=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Optional[Any] = [self.rescale(image=__lowercase , scale=__lowercase) for image in images]
if do_normalize:
__UpperCamelCase :List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase) for image in images]
__UpperCamelCase :Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :List[str] = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Dict = TaTokenizerFast
lowerCAmelCase : Optional[int] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCamelCase ,2 ) - pow(_lowerCamelCase ,2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCamelCase ,2 ) - pow(_lowerCamelCase ,2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCamelCase ,2 ) + pow(_lowerCamelCase ,2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] = ShapEPipeline
_UpperCAmelCase : Tuple = ['''prompt''']
_UpperCAmelCase : Dict = ['''prompt''']
_UpperCAmelCase : Any = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Optional[int] = False
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return 8
@property
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE_: Any = PriorTransformer(**lowerCAmelCase__)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_: Optional[int] = ShapERenderer(**lowerCAmelCase__)
return model
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.dummy_prior
SCREAMING_SNAKE_CASE_: Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] = self.dummy_renderer
SCREAMING_SNAKE_CASE_: Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_: Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = "cpu"
SCREAMING_SNAKE_CASE_: Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Dict = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE_: Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Dict = torch_device == "cpu"
SCREAMING_SNAKE_CASE_: List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 1
SCREAMING_SNAKE_CASE_: Any = 2
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_inputs(lowerCAmelCase__)
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_: List[Any] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_: Tuple = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
SCREAMING_SNAKE_CASE_: List[str] = ShapEPipeline.from_pretrained("openai/shap-e")
SCREAMING_SNAKE_CASE_: Optional[int] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
SCREAMING_SNAKE_CASE_: int = pipe(
"a shark" , generator=lowerCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 13 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowercase_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[str] ) -> str:
__a = to_pil_image(lowerCAmelCase__ )
__a , __a = pil_image.size
__a = pytesseract.image_to_data(lowerCAmelCase__ , lang=lowerCAmelCase__ , output_type='''dict''' , config=lowerCAmelCase__ )
__a , __a , __a , __a , __a = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__a = [idx for idx, word in enumerate(lowerCAmelCase__ ) if not word.strip()]
__a = [word for idx, word in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
__a = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
__a = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
__a = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
__a = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__a = []
for x, y, w, h in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase__ )
# finally, normalize the bounding boxes
__a = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['pixel_values']
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ):
super().__init__(**_a )
__a = size if size is not None else {'''height''': 224, '''width''': 224}
__a = get_size_dict(_a )
__a = do_resize
__a = size
__a = resample
__a = do_rescale
__a = rescale_value
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
__a = apply_ocr
__a = ocr_lang
__a = tesseract_config
def __UpperCAmelCase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ):
__a = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
__a = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _a , _a , _a = None , **_a , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _a , _a , _a , _a = None , **_a , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(_a )
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = apply_ocr if apply_ocr is not None else self.apply_ocr
__a = ocr_lang if ocr_lang is not None else self.ocr_lang
__a = tesseract_config if tesseract_config is not None else self.tesseract_config
__a = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__a = []
__a = []
for image in images:
__a , __a = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
__a = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
__a = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
__a = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
__a = [to_channel_dimension_format(_a , _a ) for image in images]
__a = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
__a = words_batch
__a = boxes_batch
return data
| 45 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 13 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return [ord(SCREAMING_SNAKE_CASE ) - 96 for elem in plain]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , SCREAMING_SNAKE_CASE )
print("""Decoded:""" , decode(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 46 |
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''
for i in table:
res += inp[i - 1]
return res
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
return data[1:] + data[0]
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''
for i in range(len(_UpperCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =int('0b' + data[0] + data[-1] , 2 )
_SCREAMING_SNAKE_CASE =int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =message[:4]
_SCREAMING_SNAKE_CASE =message[4:]
_SCREAMING_SNAKE_CASE =apply_table(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =xor(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =apply_sbox(_UpperCamelCase , temp[:4] ) # noqa: E741
_SCREAMING_SNAKE_CASE =apply_sbox(_UpperCamelCase , temp[4:] )
_SCREAMING_SNAKE_CASE ='0' * (2 - len(_UpperCamelCase )) + l # noqa: E741
_SCREAMING_SNAKE_CASE ='0' * (2 - len(_UpperCamelCase )) + r
_SCREAMING_SNAKE_CASE =apply_table(l + r , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =xor(_UpperCamelCase , _UpperCamelCase )
return temp + right
if __name__ == "__main__":
lowerCamelCase : List[Any] = input("Enter 10 bit key: ")
lowerCamelCase : Any = input("Enter 8 bit message: ")
lowerCamelCase : Union[str, Any] = [6, 3, 7, 4, 8, 5, 1_0, 9]
lowerCamelCase : Optional[Any] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
lowerCamelCase : Dict = [2, 4, 3, 1]
lowerCamelCase : Union[str, Any] = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCamelCase : str = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCamelCase : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCamelCase : Any = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCamelCase : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCamelCase : Optional[Any] = apply_table(key, paa_table)
lowerCamelCase : str = temp[:5]
lowerCamelCase : Optional[int] = temp[5:]
lowerCamelCase : Dict = left_shift(left)
lowerCamelCase : Dict = left_shift(right)
lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
lowerCamelCase : int = left_shift(left)
lowerCamelCase : Any = left_shift(right)
lowerCamelCase : Union[str, Any] = left_shift(left)
lowerCamelCase : Optional[int] = left_shift(right)
lowerCamelCase : str = apply_table(left + right, pa_table)
# encryption
lowerCamelCase : Optional[int] = apply_table(message, IP)
lowerCamelCase : Tuple = function(expansion, sa, sa, keya, temp)
lowerCamelCase : Any = temp[4:] + temp[:4]
lowerCamelCase : Optional[Any] = function(expansion, sa, sa, keya, temp)
lowerCamelCase : List[str] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
lowerCamelCase : List[str] = apply_table(CT, IP)
lowerCamelCase : int = function(expansion, sa, sa, keya, temp)
lowerCamelCase : Optional[int] = temp[4:] + temp[:4]
lowerCamelCase : Any = function(expansion, sa, sa, keya, temp)
lowerCamelCase : List[Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 47 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase : Any = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
if rng is None:
SCREAMING_SNAKE_CASE_: List[Any] = random.Random()
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for _ in range(_UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_: List[Any] = np.array(_UpperCAmelCase , dtype=jnp.intaa ).reshape(_UpperCAmelCase )
return output
def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor(_UpperCAmelCase , vocab_size=2 , rng=_UpperCAmelCase )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
return attn_mask
@require_flax
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = ()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[int] = inputs["input_ids"].shape[-1] // 2
SCREAMING_SNAKE_CASE_: List[str] = inputs["input_ids"][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_: Any = jnp.ones_like(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_: Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_: Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: Dict = max_length
SCREAMING_SNAKE_CASE_: List[Any] = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: str = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , flax_model.params)
SCREAMING_SNAKE_CASE_: List[Any] = flax_model.generate(lowerCAmelCase__).sequences
SCREAMING_SNAKE_CASE_: str = pt_model.generate(torch.tensor(lowerCAmelCase__ , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_: List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[Any] = True
SCREAMING_SNAKE_CASE_: Dict = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Dict = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
SCREAMING_SNAKE_CASE_: Optional[int] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[int] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: str = False
SCREAMING_SNAKE_CASE_: int = max_length
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Tuple = True
SCREAMING_SNAKE_CASE_: List[str] = max_length
SCREAMING_SNAKE_CASE_: Any = 0.8
SCREAMING_SNAKE_CASE_: Any = 10
SCREAMING_SNAKE_CASE_: List[str] = 0.3
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: int = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: List[str] = 2
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: Tuple = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Any = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: List[Any] = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[int] = True
SCREAMING_SNAKE_CASE_: Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
SCREAMING_SNAKE_CASE_: Any = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
SCREAMING_SNAKE_CASE_: List[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
SCREAMING_SNAKE_CASE_: Optional[int] = "Hello world"
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase__ , "do_samples"):
model.generate(lowerCAmelCase__ , do_samples=lowerCAmelCase__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase__ , "foo"):
SCREAMING_SNAKE_CASE_: str = {"foo": "bar"}
model.generate(lowerCAmelCase__ , **lowerCAmelCase__)
| 13 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A ( _SCREAMING_SNAKE_CASE ) -> float:
return np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , *,
UpperCamelCase__ = np.inf , UpperCamelCase__ = "linear" , UpperCamelCase__ = 0.0 , ) -> None:
lowerCamelCase : Tuple = regularization
lowerCamelCase : List[str] = gamma
if kernel == "linear":
lowerCamelCase : Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
lowerCamelCase : Any = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCamelCase : Optional[int] = F'''Unknown kernel: {kernel}'''
raise ValueError(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> float:
return np.dot(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
lowerCamelCase : Dict = observations
lowerCamelCase : List[Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCamelCase) , ) : List[Any] = np.shape(UpperCamelCase__ )
def to_minimize(UpperCamelCase__ ) -> float:
lowerCamelCase : int = 0
((lowerCamelCase) , ) : Tuple = np.shape(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(UpperCamelCase__ )
lowerCamelCase : Dict = LinearConstraint(UpperCamelCase__ , 0 , 0 )
lowerCamelCase : Union[str, Any] = Bounds(0 , self.regularization )
lowerCamelCase : Any = minimize(
UpperCamelCase__ , np.ones(UpperCamelCase__ ) , bounds=UpperCamelCase__ , constraints=[ly_contraint] ).x
lowerCamelCase : Any = l_star
# calculating mean offset of separation plane to points
lowerCamelCase : Optional[int] = 0
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowerCamelCase : Optional[int] = s / n
def _lowercase ( self , UpperCamelCase__ ) -> int:
lowerCamelCase : List[str] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase : Union[str, Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = create_model(
"HTSAT-tiny" , "roberta" , _UpperCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_UpperCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Tuple = R".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE_: Dict = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_: Any = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_: Optional[int] = re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 )
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_UpperCAmelCase )//3}.linear." )
elif re.match(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_: Optional[int] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_: Tuple = value
SCREAMING_SNAKE_CASE_: List[str] = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_: Any = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_: Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_: str = query_layer
SCREAMING_SNAKE_CASE_: int = key_layer
SCREAMING_SNAKE_CASE_: List[Any] = value_layer
else:
SCREAMING_SNAKE_CASE_: int = value
return model_state_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = clap_model.state_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = rename_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ClapConfig()
SCREAMING_SNAKE_CASE_: Tuple = enable_fusion
SCREAMING_SNAKE_CASE_: Tuple = ClapModel(_UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
transformers_config.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCAmelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 13 | 0 |
import os
def __snake_case ( ):
__a = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
__a = os.path.join(_UpperCAmelCase , '''triangle.txt''' )
with open(_UpperCAmelCase ) as f:
__a = f.readlines()
__a = []
for line in triangle:
__a = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_UpperCAmelCase ) )
a.append(_UpperCAmelCase )
for i in range(1 , len(_UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
__a = a[i - 1][j] if j != len(a[i - 1] ) else 0
__a = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCAmelCase , _UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 49 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=2 , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_: str = image_size
SCREAMING_SNAKE_CASE_: Tuple = patch_size
SCREAMING_SNAKE_CASE_: int = num_channels
SCREAMING_SNAKE_CASE_: List[str] = is_training
SCREAMING_SNAKE_CASE_: str = use_labels
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Any = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Dict = scope
SCREAMING_SNAKE_CASE_: Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_: List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Dict = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = ViTForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = ViTModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_: str = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Any = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE_: str = ViTModel.from_pretrained("facebook/dino-vits8").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480)
SCREAMING_SNAKE_CASE_: List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: int = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Tuple = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto")
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Dict = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: str = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
| 13 | 0 |
_UpperCAmelCase : Dict = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 50 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 0 |
def A (__A : list[int] , __A : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCAmelCase_ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCAmelCase_ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCAmelCase_ = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCAmelCase_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = """▁"""
__lowerCamelCase : List[str] = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
__lowerCamelCase : Tuple = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
__lowerCamelCase : Optional[Any] = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
__lowerCamelCase : int = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
__lowerCamelCase : Union[str, Any] = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class A__ ( __snake_case ):
_UpperCAmelCase :List[str] = ["input_ids"]
_UpperCAmelCase :Any = VOCAB_FILES_NAMES
_UpperCAmelCase :Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Tuple = RESOURCE_FILES_NAMES
def __init__( self , A_ , A_=None , A_=False , A_="utf8" , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , vocab_file=A_ , encoding=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : List[Any] = sentencepiece_model_ckpt
UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : List[Any] = self.load_vocab(filepath=A_ )
else:
UpperCamelCase : Optional[Any] = {self.sp_model.id_to_piece(A_ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : Union[str, Any] = {v: k for k, v in self.vocab.items()}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if text is None:
return None
UpperCamelCase : Optional[Any] = self.tokenize(A_ )
UpperCamelCase , UpperCamelCase : Any = "", []
for i, ch in enumerate(A_ ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : int = self.SP_CHAR_MAPPING.get(A_ )
else:
UpperCamelCase : int = unicodedata.normalize("NFKC" , A_ )
if self.is_whitespace(A_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(A_ ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : List[str] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : List[Any] = token[1:]
UpperCamelCase : Tuple = text[offset:].index(A_ ) + offset
UpperCamelCase : List[Any] = start + len(A_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : Dict = end
return token_mapping
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.vocab )
def __UpperCamelCase( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.__dict__.copy()
UpperCamelCase : Dict = None
return state
def __setstate__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(A_ , A_ ) for c in text) )
def __UpperCamelCase( self , A_ , A_=False , A_=64 , A_=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
UpperCamelCase : Any = True
if self.sp_model_kwargs.get("alpha" ) is not None:
UpperCamelCase : Union[str, Any] = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
UpperCamelCase : Dict = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
UpperCamelCase : Optional[int] = self.sp_model.EncodeAsPieces(A_ )
else:
UpperCamelCase : List[str] = self.sp_model.SampleEncodeAsPieces(A_ , A_ , A_ )
UpperCamelCase : Tuple = []
for pi, piece in enumerate(A_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(A_ ) and pi != 0:
new_pieces.append(A_ )
continue
else:
continue
UpperCamelCase : str = 0
for i, chunk in enumerate(A_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(A_ ) or self.is_punct(A_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(A_ )
UpperCamelCase : List[str] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Dict = i
if len(A_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = "".join(A_ ).replace(A_ , " " ).strip()
return out_string
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.convert_ids_to_tokens(A_ )
UpperCamelCase : Optional[Any] = "".join(A_ ).replace(A_ , " " ).strip()
return out_string
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.reverse_vocab.get(A_ , self.unk_token )
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
UpperCamelCase : Optional[int] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __UpperCamelCase( self , A_ , A_=None , A_=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(A_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(A_ ) + 1) + [1] * (len(A_ ) + 3)
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(A_ ) == 1:
UpperCamelCase : Optional[Any] = unicodedata.category(A_ )
if cat == "Zs":
return True
return False
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = {}
with io.open(A_ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(A_ ):
UpperCamelCase : List[Any] = line.rstrip("\n" )
UpperCamelCase : Any = int(A_ )
return token_to_idx
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Any = 0
if os.path.isdir(A_ ):
UpperCamelCase : Optional[int] = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCamelCase : List[Any] = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(A_ , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCamelCase : Any = token_index
writer.write(token + "\n" )
index += 1
UpperCamelCase : Any = os.path.join(A_ , "sentencepiece.bpe.model" )
with open(A_ , "wb" ) as fi:
UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (vocab_file,)
| 52 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = XGLMTokenizer
_UpperCAmelCase : List[Any] = XGLMTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[Any] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "<pad>"
SCREAMING_SNAKE_CASE_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase__) , 1008)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def _SCREAMING_SNAKE_CASE ( self : str):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
SCREAMING_SNAKE_CASE_: Tuple = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "Hello World!"
SCREAMING_SNAKE_CASE_: Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
# fmt: off
SCREAMING_SNAKE_CASE_: str = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 13 | 0 |
'''simple docstring'''
import qiskit
def lowercase__ ( __lowercase : int , __lowercase : int ) -> qiskit.result.counts.Counts:
"""simple docstring"""
__UpperCamelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__UpperCamelCase = qiskit.QuantumCircuit(__lowercase , __lowercase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__UpperCamelCase = qiskit.execute(__lowercase , __lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
a__ : Tuple =single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 53 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_: int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_: Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_: Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase : List[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 13 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = row, column
__SCREAMING_SNAKE_CASE = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : str ) -> str:
__SCREAMING_SNAKE_CASE = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
__SCREAMING_SNAKE_CASE = 0
for row_vector in self.array:
for obj in row_vector:
__SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
__SCREAMING_SNAKE_CASE = F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
__SCREAMING_SNAKE_CASE = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : Union[str, Any] ) -> str:
return str(self )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ) -> Matrix:
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = -self[r, c]
return result
def __sub__( self : Optional[int] , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
__SCREAMING_SNAKE_CASE = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
__SCREAMING_SNAKE_CASE = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__SCREAMING_SNAKE_CASE = F"""Unsupported type given for another ({type(UpperCAmelCase__ )})"""
raise TypeError(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Matrix:
__SCREAMING_SNAKE_CASE = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__SCREAMING_SNAKE_CASE = self[r, c]
return result
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__SCREAMING_SNAKE_CASE = v.transpose()
__SCREAMING_SNAKE_CASE = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Matrix(3 , 3 , 0 )
for i in range(3 ):
__SCREAMING_SNAKE_CASE = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
__SCREAMING_SNAKE_CASE = Matrix(3 , 1 , 0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1, 2, -3
__SCREAMING_SNAKE_CASE = Matrix(3 , 1 , 0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase_ , lowerCAmelCase_ )}""" )
def UpperCAmelCase__ ():
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 54 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=3 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=10 , UpperCamelCase=[8, 16, 32, 64] , UpperCamelCase=[1, 1, 2, 1] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=3 , UpperCamelCase=None , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=[2, 3, 4] , UpperCamelCase=1 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embeddings_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = len(UpperCamelCase )
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
lowerCamelCase_ = num_groups
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = BitModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = BitForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = BitBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCamelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BitModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=UpperCamelCase )
for name, module in model.named_modules():
if isinstance(UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ = layer_type
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = BitModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def __snake_case ( ):
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@require_torch
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCamelCase = BitConfig
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BitModelTester(self )
| 55 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = f"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = f"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = input_str.split("_" )
SCREAMING_SNAKE_CASE_: str = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE_: int = words[start_index:]
SCREAMING_SNAKE_CASE_: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE_: List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> list:
'''simple docstring'''
if len(__UpperCAmelCase ) != 2 or len(a[0] ) != 2 or len(__UpperCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
snake_case_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCAmelCase ) )
]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCAmelCase ) )
]
def __magic_name__ ( __UpperCAmelCase ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__UpperCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
snake_case_ = len(__UpperCAmelCase )
snake_case_ = matrix_length // 2
snake_case_ = [[a[i][j] for j in range(__UpperCAmelCase, __UpperCAmelCase )] for i in range(__UpperCAmelCase )]
snake_case_ = [
[a[i][j] for j in range(__UpperCAmelCase, __UpperCAmelCase )] for i in range(__UpperCAmelCase, __UpperCAmelCase )
]
snake_case_ = [[a[i][j] for j in range(__UpperCAmelCase )] for i in range(__UpperCAmelCase )]
snake_case_ = [[a[i][j] for j in range(__UpperCAmelCase )] for i in range(__UpperCAmelCase, __UpperCAmelCase )]
return top_left, top_right, bot_left, bot_right
def __magic_name__ ( __UpperCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
return len(__UpperCAmelCase ), len(matrix[0] )
def __magic_name__ ( __UpperCAmelCase ) -> None:
'''simple docstring'''
print('''\n'''.join(str(__UpperCAmelCase ) for line in matrix ) )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__UpperCAmelCase ) == (2, 2):
return default_matrix_multiplication(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = split_matrix(__UpperCAmelCase )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = split_matrix(__UpperCAmelCase )
snake_case_ = actual_strassen(__UpperCAmelCase, matrix_subtraction(__UpperCAmelCase, __UpperCAmelCase ) )
snake_case_ = actual_strassen(matrix_addition(__UpperCAmelCase, __UpperCAmelCase ), __UpperCAmelCase )
snake_case_ = actual_strassen(matrix_addition(__UpperCAmelCase, __UpperCAmelCase ), __UpperCAmelCase )
snake_case_ = actual_strassen(__UpperCAmelCase, matrix_subtraction(__UpperCAmelCase, __UpperCAmelCase ) )
snake_case_ = actual_strassen(matrix_addition(__UpperCAmelCase, __UpperCAmelCase ), matrix_addition(__UpperCAmelCase, __UpperCAmelCase ) )
snake_case_ = actual_strassen(matrix_subtraction(__UpperCAmelCase, __UpperCAmelCase ), matrix_addition(__UpperCAmelCase, __UpperCAmelCase ) )
snake_case_ = actual_strassen(matrix_subtraction(__UpperCAmelCase, __UpperCAmelCase ), matrix_addition(__UpperCAmelCase, __UpperCAmelCase ) )
snake_case_ = matrix_addition(matrix_subtraction(matrix_addition(__UpperCAmelCase, __UpperCAmelCase ), __UpperCAmelCase ), __UpperCAmelCase )
snake_case_ = matrix_addition(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = matrix_addition(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = matrix_subtraction(matrix_subtraction(matrix_addition(__UpperCAmelCase, __UpperCAmelCase ), __UpperCAmelCase ), __UpperCAmelCase )
# construct the new matrix from our 4 quadrants
snake_case_ = []
for i in range(len(__UpperCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__UpperCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__UpperCAmelCase )[1] != matrix_dimensions(__UpperCAmelCase )[0]:
snake_case_ = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(__UpperCAmelCase )
snake_case_ = matrix_dimensions(__UpperCAmelCase )
snake_case_ = matrix_dimensions(__UpperCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case_ = max(*__UpperCAmelCase, *__UpperCAmelCase )
snake_case_ = int(math.pow(2, math.ceil(math.loga(__UpperCAmelCase ) ) ) )
snake_case_ = matrixa
snake_case_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0, __UpperCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1], __UpperCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1], __UpperCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case_ = actual_strassen(__UpperCAmelCase, __UpperCAmelCase )
# Removing the additional zeros
for i in range(0, __UpperCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1], __UpperCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 56 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for _ in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: List[str] = []
for step in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )
scheduler.load_state_dict(_UpperCAmelCase )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(100):
SCREAMING_SNAKE_CASE_: Dict = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , )
for _ in range(1000):
SCREAMING_SNAKE_CASE_: List[Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase : Optional[Any] = 10
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = data
SCREAMING_SNAKE_CASE_: List[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
SCREAMING_SNAKE_CASE_: int = unwrap_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE_: List[str] = scheduler_func(self.optimizer , **lowerCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: Tuple = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"failed for {scheduler_func} in save and reload")
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = fn
def __call__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple):
return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = list(map(self , scheduler.lr_lambdas))
| 13 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A : Dict = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
__lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__a , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def snake_case ( self ):
__lowerCAmelCase = "src/transformers"
shutil.rmtree(self.transformer_dir )
def snake_case ( self , __a , __a , __a , __a=None ):
__lowerCAmelCase = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
__lowerCAmelCase = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
__lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
__lowerCAmelCase = black.format_str(__a , mode=__a )
__lowerCAmelCase = os.path.join(self.transformer_dir , "new_code.py" )
with open(__a , "w" , newline="\n" ) as f:
f.write(__a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__a )
with open(__a , "r" ) as f:
self.assertTrue(f.read() , __a )
def snake_case ( self ):
__lowerCAmelCase = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(__a , __a )
def snake_case ( self ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , __a , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , __a ) , )
# Copy consistency with a really long name
__lowerCAmelCase = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub("Bert" , __a , __a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , __a , overwrite_result=re.sub("Bert" , "TestModel" , __a ) , )
def snake_case ( self ):
__lowerCAmelCase = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
__lowerCAmelCase , __lowerCAmelCase = check_copies.convert_to_localized_md(
__a , __a , localized_readme["format_model_list"] )
self.assertFalse(__a )
self.assertEqual(__a , __a )
__lowerCAmelCase , __lowerCAmelCase = check_copies.convert_to_localized_md(
__a , __a , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__a )
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__lowerCAmelCase , __lowerCAmelCase = check_copies.convert_to_localized_md(
__a , __a , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(__a , __a )
| 57 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowerCAmelCase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_: Optional[int] = self.input_schema.copy()
SCREAMING_SNAKE_CASE_: Dict = features[self.audio_column]
SCREAMING_SNAKE_CASE_: int = input_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( __lowerCamelCase : int = 4 ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = abs(__lowerCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__lowerCamelCase )] for y in range(__lowerCamelCase )]
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
return reverse_row(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
return reverse_row(reverse_column(__lowerCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
return reverse_column(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = [list(__lowerCamelCase ) for x in zip(*__lowerCamelCase )]
return matrix
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = matrix[::-1]
return matrix
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->None:
for i in matrix:
print(*__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
lowercase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
lowercase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 58 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = 20
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1)
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: str = 10
SCREAMING_SNAKE_CASE_: Tuple = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_: Any = 5
SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Dict = 10
SCREAMING_SNAKE_CASE_: Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 20
SCREAMING_SNAKE_CASE_: List[str] = 4
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_: int = 5
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = 15
SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: str = 4
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_: List[Any] = 3
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = 20
SCREAMING_SNAKE_CASE_: Optional[Any] = 4
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[Any] = 5
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = 4
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: int = 15
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: List[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# with processor list
SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[str] = 15
SCREAMING_SNAKE_CASE_: List[Any] = 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 13 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class UpperCAmelCase ( A_ ):
A__ : int = "distilbert"
A__ : List[str] = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self : str , snake_case__ : Union[str, Any]=3_05_22 , snake_case__ : int=5_12 , snake_case__ : Optional[int]=False , snake_case__ : Optional[int]=6 , snake_case__ : Any=12 , snake_case__ : List[Any]=7_68 , snake_case__ : int=4 * 7_68 , snake_case__ : int=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : str="gelu" , snake_case__ : Any=0.02 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[Any]=0.2 , snake_case__ : Dict=0 , **snake_case__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = vocab_size
snake_case : int = max_position_embeddings
snake_case : Optional[Any] = sinusoidal_pos_embds
snake_case : List[str] = n_layers
snake_case : List[Any] = n_heads
snake_case : str = dim
snake_case : Tuple = hidden_dim
snake_case : Union[str, Any] = dropout
snake_case : List[str] = attention_dropout
snake_case : Any = activation
snake_case : int = initializer_range
snake_case : List[Any] = qa_dropout
snake_case : str = seq_classif_dropout
super().__init__(**snake_case__ , pad_token_id=snake_case__ )
class UpperCAmelCase ( A_ ):
@property
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 59 |
import math
import sys
def A_ ( _UpperCAmelCase ):
if number != int(_UpperCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE_: List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE_: str = sys.maxsize
SCREAMING_SNAKE_CASE_: List[Any] = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ : Optional[int] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.