code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def A ( lowercase__ : Optional[Any]=None ) -> Dict:
UpperCamelCase__ :int = argparse.ArgumentParser(add_help=lowercase__ , allow_abbrev=lowercase__ )
# The main config parser
UpperCamelCase__ :Tuple = config_command_parser(lowercase__ )
# The subparser to add commands to
UpperCamelCase__ :int = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(lowercase__ , parents=[parent_parser] )
update_command_parser(lowercase__ , parents=[parent_parser] )
return config_parser
def A ( ) -> Tuple:
UpperCamelCase__ :Any = get_config_parser()
UpperCamelCase__ :Optional[int] = config_parser.parse_args()
if not hasattr(lowercase__ , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowercase__ )
if __name__ == "__main__":
main()
| 45 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 | 1 |
from statistics import mean
import numpy as np
def A ( lowercase__ : list , lowercase__ : list , lowercase__ : list , lowercase__ : int ) -> list:
UpperCamelCase__ :Optional[Any] = 0
# Number of processes finished
UpperCamelCase__ :List[str] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCamelCase__ :Optional[Any] = [0] * no_of_process
# List to include calculation results
UpperCamelCase__ :Tuple = [0] * no_of_process
# Sort by arrival time.
UpperCamelCase__ :Dict = [burst_time[i] for i in np.argsort(lowercase__ )]
UpperCamelCase__ :Tuple = [process_name[i] for i in np.argsort(lowercase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCamelCase__ :Union[str, Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCamelCase__ :List[Any] = arrival_time[i]
UpperCamelCase__ :int = 0
# Index showing the location of the process being performed
UpperCamelCase__ :List[str] = 0
# Saves the current response ratio.
UpperCamelCase__ :Union[str, Any] = 0
for i in range(0 , lowercase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCamelCase__ :List[str] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCamelCase__ :Union[str, Any] = temp
UpperCamelCase__ :Optional[Any] = i
# Calculate the turn around time
UpperCamelCase__ :Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCamelCase__ :Union[str, Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A ( lowercase__ : list , lowercase__ : list , lowercase__ : list , lowercase__ : int ) -> list:
UpperCamelCase__ :List[str] = [0] * no_of_process
for i in range(0 , lowercase__ ):
UpperCamelCase__ :List[str] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCamelCase = 5
UpperCamelCase = ["A", "B", "C", "D", "E"]
UpperCamelCase = [1, 2, 3, 4, 5]
UpperCamelCase = [1, 2, 3, 4, 5]
UpperCamelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCamelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 45 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 1 |
from __future__ import annotations
UpperCamelCase = 1.6021E-19 # units = C
def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 | 1 |
from timeit import timeit
def A ( lowercase__ : int ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase__ :List[Any] = 0
while number:
number &= number - 1
result += 1
return result
def A ( lowercase__ : int ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase__ :int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A ( ) -> None:
def do_benchmark(lowercase__ : int ) -> None:
UpperCamelCase__ :str = """import __main__ as z"""
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(lowercase__ ) = }""" )
UpperCamelCase__ :List[Any] = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=lowercase__ )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(lowercase__ ) = }""" )
UpperCamelCase__ :Optional[Any] = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=lowercase__ , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(lowercase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 45 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCamelCase = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
UpperCamelCase = json.load(f)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :List[Any] , lowerCamelCase__ :Tuple ):
return FSMTTokenizer.from_pretrained(lowerCamelCase__ )
def __a ( self :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Union[str, Any] = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def __a ( self :Dict , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCamelCase__ :Optional[Any] = f"""facebook/wmt19-{pair}"""
UpperCamelCase__ :List[str] = self.get_tokenizer(lowerCamelCase__ )
UpperCamelCase__ :List[str] = self.get_model(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = bleu_data[pair]["""src"""]
UpperCamelCase__ :Optional[Any] = bleu_data[pair]["""tgt"""]
UpperCamelCase__ :Union[str, Any] = tokenizer(lowerCamelCase__ , return_tensors="""pt""" , truncation=lowerCamelCase__ , padding="""longest""" ).to(lowerCamelCase__ )
UpperCamelCase__ :str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCamelCase__ :Any = tokenizer.batch_decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
UpperCamelCase__ :Dict = calculate_bleu(lowerCamelCase__ , lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertGreaterEqual(scores["""bleu"""] , lowerCamelCase__ )
| 45 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> Optional[Any]:
UpperCamelCase__ :Optional[int] = os.path.dirname(os.path.realpath(lowercase__ ) )
UpperCamelCase__ :Optional[Any] = os.path.join(lowercase__ , """words.txt""" )
UpperCamelCase__ :Optional[Any] = """"""
with open(lowercase__ ) as f:
UpperCamelCase__ :int = f.readline()
UpperCamelCase__ :Optional[int] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
UpperCamelCase__ :Union[str, Any] = [
word
for word in [sum(ord(lowercase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 45 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
UpperCamelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
UpperCamelCase = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A ( ) -> str:
UpperCamelCase__ :Optional[int] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase__ :List[str] = bs[:]
UpperCamelCase__ :List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
UpperCamelCase__ :List[Any] = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def A ( lowercase__ : Dict ) -> List[str]:
UpperCamelCase__ :List[str] = set()
UpperCamelCase__ :Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase__ :List[Any] = char
return pairs
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : Any = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ["""input_ids""", """attention_mask"""]
def __init__( self :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :int="replace" , lowerCamelCase__ :Tuple="<s>" , lowerCamelCase__ :Dict="</s>" , lowerCamelCase__ :List[Any]="</s>" , lowerCamelCase__ :int="<s>" , lowerCamelCase__ :List[Any]="<unk>" , lowerCamelCase__ :Any="<pad>" , lowerCamelCase__ :int="<mask>" , lowerCamelCase__ :List[Any]=False , **lowerCamelCase__ :Dict , ):
UpperCamelCase__ :str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
UpperCamelCase__ :Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
UpperCamelCase__ :Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
UpperCamelCase__ :Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
UpperCamelCase__ :Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
UpperCamelCase__ :Union[str, Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ :List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase__ :List[str] = json.load(lowerCamelCase__ )
UpperCamelCase__ :Any = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ :List[Any] = errors # how to handle errors in decoding
UpperCamelCase__ :Union[str, Any] = bytes_to_unicode()
UpperCamelCase__ :int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase__ :Dict = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase__ :Dict = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase__ :int = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCamelCase__ :Dict = {}
UpperCamelCase__ :Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ :str = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __a ( self :Dict ):
return len(self.encoder )
def __a ( self :Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self :List[str] , lowerCamelCase__ :Optional[Any] ):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ :Tuple = tuple(lowerCamelCase__ )
UpperCamelCase__ :Any = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
UpperCamelCase__ :Optional[int] = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase__ , UpperCamelCase__ :Dict = bigram
UpperCamelCase__ :Dict = []
UpperCamelCase__ :Any = 0
while i < len(lowerCamelCase__ ):
try:
UpperCamelCase__ :Union[str, Any] = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase__ :Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase__ :List[Any] = tuple(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
UpperCamelCase__ :Union[str, Any] = get_pairs(lowerCamelCase__ )
UpperCamelCase__ :Any = """ """.join(lowerCamelCase__ )
UpperCamelCase__ :Dict = word
return word
def __a ( self :int , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Dict = []
for token in re.findall(self.pat , lowerCamelCase__ ):
UpperCamelCase__ :Tuple = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(""" """ ) )
return bpe_tokens
def __a ( self :Union[str, Any] , lowerCamelCase__ :str ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def __a ( self :Optional[Any] , lowerCamelCase__ :Dict ):
return self.decoder.get(lowerCamelCase__ )
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :str = """""".join(lowerCamelCase__ )
UpperCamelCase__ :Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[str] = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase__ :Tuple = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ :Optional[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + """\n""" )
UpperCamelCase__ :str = 0
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase__ :Optional[int] = token_index
writer.write(""" """.join(lowerCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __a ( self :Optional[int] , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None , lowerCamelCase__ :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
UpperCamelCase__ :Optional[Any] = [self.sep_token_id]
UpperCamelCase__ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple=False , **lowerCamelCase__ :Any ):
UpperCamelCase__ :List[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
UpperCamelCase__ :List[str] = """ """ + text
return (text, kwargs)
def __a ( self :Optional[int] , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def __a ( self :Tuple , lowerCamelCase__ :"Conversation" ):
UpperCamelCase__ :List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase__ )
UpperCamelCase__ :Dict = """ """.join(lowerCamelCase__ )
UpperCamelCase__ :List[str] = self.encode(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.model_max_length:
UpperCamelCase__ :Dict = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 45 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCamelCase = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def A ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : int ) -> Tuple:
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def A ( lowercase__ : str , lowercase__ : Optional[str] = None ) -> None:
UpperCamelCase__ :Union[str, Any] = f"""\n{hint}""" if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , lowercase__ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = requirement, None, None
else:
UpperCamelCase__ :Tuple = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , lowercase__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f""" got {requirement}""" )
UpperCamelCase__ , UpperCamelCase__ :Tuple = match[0]
UpperCamelCase__ :Optional[Any] = want_full.split(""",""" ) # there could be multiple requirements
UpperCamelCase__ :List[str] = {}
for w in want_range:
UpperCamelCase__ :Dict = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , lowercase__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f""" but got {requirement}""" )
UpperCamelCase__ , UpperCamelCase__ :List[str] = match[0]
UpperCamelCase__ :Tuple = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCamelCase__ :int = """.""".join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
UpperCamelCase__ :int = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def A ( lowercase__ : Optional[int] ) -> List[str]:
UpperCamelCase__ :Tuple = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(lowercase__ , lowercase__ )
| 45 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 | 1 |
UpperCamelCase = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def A ( lowercase__ : str , lowercase__ : str , lowercase__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ :Any = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(lowercase__ )}"""
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 | 1 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 1 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 45 |
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def A ( lowercase__ : List[str] ) -> Tuple:
UpperCamelCase__ :Dict = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"""{test_file} instead.""" )
UpperCamelCase__ :Optional[int] = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
UpperCamelCase__ :Union[str, Any] = components[:-1] + [test_fn.replace(""".py""" , """""" )]
UpperCamelCase__ :List[Any] = """.""".join(lowercase__ )
return test_module_path
def A ( lowercase__ : int ) -> Tuple:
UpperCamelCase__ :Optional[Any] = get_module_path(lowercase__ )
UpperCamelCase__ :List[str] = importlib.import_module(lowercase__ )
return test_module
def A ( lowercase__ : Union[str, Any] ) -> int:
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :str = get_test_module(lowercase__ )
for attr in dir(lowercase__ ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(lowercase__ , lowercase__ ) )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def A ( lowercase__ : Union[str, Any] ) -> Optional[Any]:
UpperCamelCase__ :int = []
UpperCamelCase__ :Union[str, Any] = get_test_module(lowercase__ )
for attr in dir(lowercase__ ):
UpperCamelCase__ :Optional[int] = getattr(lowercase__ , lowercase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCamelCase__ :Union[str, Any] = getattr(lowercase__ , """all_model_classes""" , [] )
if len(lowercase__ ) > 0:
test_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def A ( lowercase__ : Dict ) -> Union[str, Any]:
UpperCamelCase__ :List[str] = get_test_classes(lowercase__ )
UpperCamelCase__ :Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def A ( lowercase__ : List[str] ) -> List[str]:
UpperCamelCase__ :Any = test_class()
if hasattr(lowercase__ , """setUp""" ):
test.setUp()
UpperCamelCase__ :str = None
if hasattr(lowercase__ , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCamelCase__ :str = test.model_tester.__class__
return model_tester
def A ( lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]:
UpperCamelCase__ :int = get_test_classes(lowercase__ )
UpperCamelCase__ :str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] ) -> List[Any]:
UpperCamelCase__ :Optional[Any] = get_test_classes_for_model(lowercase__ , lowercase__ )
UpperCamelCase__ :Optional[Any] = []
for test_class in test_classes:
UpperCamelCase__ :Optional[int] = get_model_tester_from_test_class(lowercase__ )
if tester_class is not None:
tester_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def A ( lowercase__ : Tuple ) -> Optional[Any]:
UpperCamelCase__ :Tuple = get_test_classes(lowercase__ )
UpperCamelCase__ :Any = {test_class: get_model_tester_from_test_class(lowercase__ ) for test_class in test_classes}
return test_tester_mapping
def A ( lowercase__ : int ) -> Tuple:
UpperCamelCase__ :Tuple = get_model_classes(lowercase__ )
UpperCamelCase__ :Dict = {
model_class: get_test_classes_for_model(lowercase__ , lowercase__ ) for model_class in model_classes
}
return model_test_mapping
def A ( lowercase__ : str ) -> Union[str, Any]:
UpperCamelCase__ :Optional[int] = get_model_classes(lowercase__ )
UpperCamelCase__ :Tuple = {
model_class: get_tester_classes_for_model(lowercase__ , lowercase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def A ( lowercase__ : Optional[Any] ) -> List[str]:
if isinstance(lowercase__ , lowercase__ ):
return o
elif isinstance(lowercase__ , lowercase__ ):
return o.__name__
elif isinstance(lowercase__ , (list, tuple) ):
return [to_json(lowercase__ ) for x in o]
elif isinstance(lowercase__ , lowercase__ ):
return {to_json(lowercase__ ): to_json(lowercase__ ) for k, v in o.items()}
else:
return o
| 45 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
import os
from distutils.util import strtobool
def A ( lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> List[str]:
for e in env_keys:
UpperCamelCase__ :Optional[Any] = int(os.environ.get(lowercase__ , -1 ) )
if val >= 0:
return val
return default
def A ( lowercase__ : Optional[int] , lowercase__ : int=False ) -> Optional[int]:
UpperCamelCase__ :Optional[Any] = os.environ.get(lowercase__ , str(lowercase__ ) )
return strtobool(lowercase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def A ( lowercase__ : Tuple , lowercase__ : Dict="no" ) -> List[Any]:
UpperCamelCase__ :Optional[int] = os.environ.get(lowercase__ , str(lowercase__ ) )
return value
| 45 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[str] = (KDPMaDiscreteScheduler,)
_snake_case : List[Any] = 10
def __a ( self :List[str] , **lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :int = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCamelCase__ )
return config
def __a ( self :Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __a ( self :List[str] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def __a ( self :Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __a ( self :List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __a ( self :int ):
UpperCamelCase__ :Optional[int] = self.scheduler_classes[0]
UpperCamelCase__ :Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase__ :int = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ :Optional[Any] = self.dummy_model()
UpperCamelCase__ :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ :Dict = sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ :Dict = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = model(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = output.prev_sample
UpperCamelCase__ :Dict = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCamelCase__ :Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def __a ( self :Optional[int] ):
if torch_device == "mps":
return
UpperCamelCase__ :Optional[int] = self.scheduler_classes[0]
UpperCamelCase__ :Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__ :List[Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ :Any = self.dummy_model()
UpperCamelCase__ :Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ :Tuple = sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ :List[Any] = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :str = model(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = output.prev_sample
UpperCamelCase__ :Tuple = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCamelCase__ :Any = torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def __a ( self :str ):
if torch_device == "mps":
return
UpperCamelCase__ :str = self.scheduler_classes[0]
UpperCamelCase__ :int = self.get_scheduler_config()
UpperCamelCase__ :List[str] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = self.dummy_model()
UpperCamelCase__ :Optional[int] = self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase__ :Union[str, Any] = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = output.prev_sample
UpperCamelCase__ :str = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCamelCase__ :List[str] = torch.mean(torch.abs(lowerCamelCase__ ) )
if str(lowerCamelCase__ ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 45 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCamelCase = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
UpperCamelCase = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
UpperCamelCase = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __a ( self :Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __a ( self :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict=4 , lowerCamelCase__ :List[Any]=False ):
UpperCamelCase__ :Optional[Any] = compute_bleu(
reference_corpus=lowerCamelCase__ , translation_corpus=lowerCamelCase__ , max_order=lowerCamelCase__ , smooth=lowerCamelCase__ )
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any]=3 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :str=3 , lowerCamelCase__ :str=10 , lowerCamelCase__ :Dict=[8, 16, 32, 64] , lowerCamelCase__ :Optional[int]=[1, 1, 2, 1] , lowerCamelCase__ :Tuple=True , lowerCamelCase__ :List[str]=True , lowerCamelCase__ :List[str]="relu" , lowerCamelCase__ :Tuple=3 , lowerCamelCase__ :Optional[int]=None , lowerCamelCase__ :List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase__ :Any=[2, 3, 4] , lowerCamelCase__ :Dict=1 , ):
UpperCamelCase__ :List[str] = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :str = image_size
UpperCamelCase__ :str = num_channels
UpperCamelCase__ :Dict = embeddings_size
UpperCamelCase__ :List[str] = hidden_sizes
UpperCamelCase__ :str = depths
UpperCamelCase__ :int = is_training
UpperCamelCase__ :List[Any] = use_labels
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :str = num_labels
UpperCamelCase__ :Optional[Any] = scope
UpperCamelCase__ :str = len(lowerCamelCase__ )
UpperCamelCase__ :List[str] = out_features
UpperCamelCase__ :Optional[Any] = out_indices
UpperCamelCase__ :str = num_groups
def __a ( self :Optional[int] ):
UpperCamelCase__ :str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __a ( self :List[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __a ( self :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Dict , lowerCamelCase__ :str ):
UpperCamelCase__ :List[Any] = BitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
UpperCamelCase__ :Tuple = self.num_labels
UpperCamelCase__ :int = BitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :int = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Tuple = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :int = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self :Tuple ):
UpperCamelCase__ :Dict = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_snake_case : Optional[int] = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
_snake_case : str = False
_snake_case : Any = False
_snake_case : Tuple = False
_snake_case : Dict = False
_snake_case : Optional[Any] = False
def __a ( self :str ):
UpperCamelCase__ :List[str] = BitModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def __a ( self :List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self :Any ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def __a ( self :List[str] ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def __a ( self :int ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def __a ( self :Any ):
pass
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :List[Any] = model_class(lowerCamelCase__ )
UpperCamelCase__ :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :str = [*signature.parameters.keys()]
UpperCamelCase__ :Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :int = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def __a ( self :Tuple ):
def check_hidden_states_output(lowerCamelCase__ :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Any ):
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :str = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase__ :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ :Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase__ , UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Optional[int] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ :Tuple = layer_type
UpperCamelCase__ :Optional[int] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :Dict = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def __a ( self :Dict ):
pass
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :str = BitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> Tuple:
UpperCamelCase__ :Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Any ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __a ( self :Any ):
UpperCamelCase__ :Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
UpperCamelCase__ :Dict = self.default_image_processor
UpperCamelCase__ :Union[str, Any] = prepare_img()
UpperCamelCase__ :List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ :Any = model(**lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (BitBackbone,) if is_torch_available() else ()
_snake_case : List[Any] = BitConfig
_snake_case : List[str] = False
def __a ( self :Dict ):
UpperCamelCase__ :Tuple = BitModelTester(self )
| 45 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 | 1 |
import argparse
import json
import subprocess
def A ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Tuple:
UpperCamelCase__ :str = []
UpperCamelCase__ :List[str] = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
UpperCamelCase__ :Any = subprocess.run(lowercase__ , shell=lowercase__ , stdout=subprocess.PIPE )
UpperCamelCase__ :List[str] = output.stdout.decode("""utf-8""" )
UpperCamelCase__ :Union[str, Any] = json.loads(lowercase__ )
UpperCamelCase__ :Tuple = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowercase__ )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(lowercase__ ) )
if len(lowercase__ ) > 0:
UpperCamelCase__ :Dict = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def A ( lowercase__ : Tuple ) -> Tuple:
return values.split(""",""" )
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
UpperCamelCase = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 | 1 |
def A ( ) -> int:
return 1
def A ( lowercase__ : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def A ( lowercase__ : int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowercase__ )
def A ( lowercase__ : int ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowercase__ )
def A ( lowercase__ : int ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowercase__ )
def A ( lowercase__ : int ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowercase__ )
def A ( lowercase__ : int ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowercase__ )
def A ( lowercase__ : int ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowercase__ )
def A ( lowercase__ : int = 200 ) -> int:
return two_pound(lowercase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 45 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 | 1 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def A ( lowercase__ : int ) -> str:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
UpperCamelCase__ :Union[str, Any] = precision
UpperCamelCase__ :List[str] = ceil(precision / 14 )
UpperCamelCase__ :Optional[int] = 42_6880 * Decimal(1_0005 ).sqrt()
UpperCamelCase__ :Any = 1
UpperCamelCase__ :str = 1359_1409
UpperCamelCase__ :int = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
UpperCamelCase__ :Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 45 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any]=13 , lowerCamelCase__ :List[Any]=7 , lowerCamelCase__ :int=6 , lowerCamelCase__ :Union[str, Any]=17 , lowerCamelCase__ :Any=23 , lowerCamelCase__ :str=11 , lowerCamelCase__ :List[Any]=True , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :List[Any] = seq_length
UpperCamelCase__ :Any = act_dim
UpperCamelCase__ :str = state_dim
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :List[str] = max_length
UpperCamelCase__ :List[str] = is_training
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Tuple = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCamelCase__ :Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCamelCase__ :str = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase__ :int = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase__ :Any = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
UpperCamelCase__ :List[Any] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCamelCase__ :Union[str, Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self :Optional[Any] ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Union[str, Any] , ):
UpperCamelCase__ :List[str] = DecisionTransformerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Any = model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Optional[Any] = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Any = (DecisionTransformerModel,) if is_torch_available() else ()
_snake_case : List[str] = ()
_snake_case : Any = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_snake_case : int = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_snake_case : List[Any] = False
_snake_case : Any = False
_snake_case : List[Any] = False
_snake_case : List[Any] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
_snake_case : List[Any] = False
_snake_case : Union[str, Any] = False
_snake_case : int = False
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Tuple = DecisionTransformerModelTester(self )
UpperCamelCase__ :Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Optional[Any] ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Any = DecisionTransformerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __a ( self :Optional[Any] ):
UpperCamelCase__ , UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :int = model_class(lowerCamelCase__ )
UpperCamelCase__ :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Optional[int] = [*signature.parameters.keys()]
UpperCamelCase__ :Optional[Any] = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(lowerCamelCase__ )] , lowerCamelCase__ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = 2 # number of steps of autoregressive prediction we will perform
UpperCamelCase__ :Tuple = 10 # defined by the RL environment, may be normalized
UpperCamelCase__ :Union[str, Any] = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
UpperCamelCase__ :List[Any] = model.to(lowerCamelCase__ )
UpperCamelCase__ :int = model.config
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase__ , dtype=torch.floataa ) # env.reset()
UpperCamelCase__ :Optional[int] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = torch.tensor(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCamelCase__ :List[str] = state
UpperCamelCase__ :str = torch.zeros(1 , 0 , config.act_dim , device=lowerCamelCase__ , dtype=torch.floataa )
UpperCamelCase__ :Dict = torch.zeros(1 , 0 , device=lowerCamelCase__ , dtype=torch.floataa )
UpperCamelCase__ :List[Any] = torch.tensor(0 , device=lowerCamelCase__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowerCamelCase__ ):
UpperCamelCase__ :int = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCamelCase__ )] , dim=1 )
UpperCamelCase__ :Any = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCamelCase__ )] , dim=1 )
UpperCamelCase__ :List[Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = model(
states=lowerCamelCase__ , actions=lowerCamelCase__ , rewards=lowerCamelCase__ , returns_to_go=lowerCamelCase__ , timesteps=lowerCamelCase__ , attention_mask=lowerCamelCase__ , return_dict=lowerCamelCase__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowerCamelCase__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCamelCase__ :List[str] = action_pred[0, -1]
UpperCamelCase__ :List[Any] = torch.cat([states, state] , dim=1 )
UpperCamelCase__ :Any = returns_to_go[0, -1] - reward
UpperCamelCase__ :Any = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCamelCase__ :Dict = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCamelCase__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 45 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase = {
"moussaKam/mbarthez": 1_024,
"moussaKam/barthez": 1_024,
"moussaKam/barthez-orangesum-title": 1_024,
}
UpperCamelCase = "▁"
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Any = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ["""input_ids""", """attention_mask"""]
_snake_case : Tuple = BarthezTokenizer
def __init__( self :Union[str, Any] , lowerCamelCase__ :str=None , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :List[Any]="<s>" , lowerCamelCase__ :Optional[int]="</s>" , lowerCamelCase__ :Dict="</s>" , lowerCamelCase__ :Optional[int]="<s>" , lowerCamelCase__ :List[Any]="<unk>" , lowerCamelCase__ :Union[str, Any]="<pad>" , lowerCamelCase__ :int="<mask>" , **lowerCamelCase__ :Any , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ :Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ :int = vocab_file
UpperCamelCase__ :Optional[int] = False if not self.vocab_file else True
def __a ( self :Optional[Any] , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ :List[Any] = [self.cls_token_id]
UpperCamelCase__ :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self :List[Any] , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
UpperCamelCase__ :Union[str, Any] = [self.sep_token_id]
UpperCamelCase__ :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase__ :Any = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 45 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 | 1 |
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[str] ):
UpperCamelCase__ :List[str] = """"""
UpperCamelCase__ :Optional[Any] = """"""
UpperCamelCase__ :Union[str, Any] = []
def __a ( self :List[str] , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCamelCase__ :str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCamelCase__ :Optional[Any] = self.__min_dist_top_down_dp(lowerCamelCase__ , n - 1 )
UpperCamelCase__ :List[Any] = self.__min_dist_top_down_dp(m - 1 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCamelCase__ :List[Any] = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return self.dp[m][n]
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = worda
UpperCamelCase__ :Union[str, Any] = worda
UpperCamelCase__ :str = [[-1 for _ in range(len(lowerCamelCase__ ) )] for _ in range(len(lowerCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(lowerCamelCase__ ) - 1 , len(lowerCamelCase__ ) - 1 )
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :str ):
UpperCamelCase__ :Union[str, Any] = worda
UpperCamelCase__ :str = worda
UpperCamelCase__ :Dict = len(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = len(lowerCamelCase__ )
UpperCamelCase__ :Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCamelCase__ :str = j
elif j == 0: # second string is empty
UpperCamelCase__ :List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCamelCase__ :Tuple = self.dp[i - 1][j - 1]
else:
UpperCamelCase__ :Optional[int] = self.dp[i][j - 1]
UpperCamelCase__ :str = self.dp[i - 1][j]
UpperCamelCase__ :Union[str, Any] = self.dp[i - 1][j - 1]
UpperCamelCase__ :List[str] = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
UpperCamelCase = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
UpperCamelCase = input("Enter the first string: ").strip()
UpperCamelCase = input("Enter the second string: ").strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 45 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCamelCase = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[str] = """maskformer"""
_snake_case : Any = {"""hidden_size""": """mask_feature_size"""}
_snake_case : Union[str, Any] = ["""resnet""", """swin"""]
_snake_case : List[str] = ["""detr"""]
def __init__( self :Any , lowerCamelCase__ :int = 2_56 , lowerCamelCase__ :int = 2_56 , lowerCamelCase__ :float = 0.1 , lowerCamelCase__ :bool = False , lowerCamelCase__ :Optional[Dict] = None , lowerCamelCase__ :Optional[Dict] = None , lowerCamelCase__ :float = 0.02 , lowerCamelCase__ :float = 1.0 , lowerCamelCase__ :float = 1.0 , lowerCamelCase__ :float = 1.0 , lowerCamelCase__ :float = 20.0 , lowerCamelCase__ :Optional[bool] = None , **lowerCamelCase__ :str , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCamelCase__ :Optional[Any] = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = backbone_config.pop("""model_type""" )
UpperCamelCase__ :Any = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ :Optional[int] = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCamelCase__ :Tuple = DetrConfig()
else:
# verify that the decoder is supported
UpperCamelCase__ :Optional[Any] = (
decoder_config.pop("""model_type""" ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ :Any = CONFIG_MAPPING[decoder_type]
UpperCamelCase__ :Optional[Any] = config_class.from_dict(lowerCamelCase__ )
UpperCamelCase__ :Tuple = backbone_config
UpperCamelCase__ :Optional[int] = decoder_config
# main feature dimension for the model
UpperCamelCase__ :Union[str, Any] = fpn_feature_size
UpperCamelCase__ :int = mask_feature_size
# initializer
UpperCamelCase__ :Union[str, Any] = init_std
UpperCamelCase__ :Tuple = init_xavier_std
# Hungarian matcher && loss
UpperCamelCase__ :Dict = cross_entropy_weight
UpperCamelCase__ :int = dice_weight
UpperCamelCase__ :Optional[Any] = mask_weight
UpperCamelCase__ :int = use_auxiliary_loss
UpperCamelCase__ :List[str] = no_object_weight
UpperCamelCase__ :Any = output_auxiliary_logits
UpperCamelCase__ :Union[str, Any] = self.decoder_config.encoder_attention_heads
UpperCamelCase__ :Any = self.decoder_config.num_hidden_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def __a ( cls :Tuple , lowerCamelCase__ :PretrainedConfig , lowerCamelCase__ :PretrainedConfig , **lowerCamelCase__ :List[Any] ):
return cls(
backbone_config=lowerCamelCase__ , decoder_config=lowerCamelCase__ , **lowerCamelCase__ , )
def __a ( self :List[Any] ):
UpperCamelCase__ :List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase__ :Dict = self.backbone_config.to_dict()
UpperCamelCase__ :str = self.decoder_config.to_dict()
UpperCamelCase__ :Optional[int] = self.__class__.model_type
return output
| 45 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCamelCase = 6_378_137.0
UpperCamelCase = 6_356_752.314_245
UpperCamelCase = 6_378_137
def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> float:
UpperCamelCase__ :List[str] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCamelCase__ :str = atan((1 - flattening) * tan(radians(lowercase__ ) ) )
UpperCamelCase__ :Dict = atan((1 - flattening) * tan(radians(lowercase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCamelCase__ :int = haversine_distance(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCamelCase__ :str = (b_lata + b_lata) / 2
UpperCamelCase__ :List[str] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCamelCase__ :str = (sin(lowercase__ ) ** 2) * (cos(lowercase__ ) ** 2)
UpperCamelCase__ :Union[str, Any] = cos(sigma / 2 ) ** 2
UpperCamelCase__ :Tuple = (sigma - sin(lowercase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCamelCase__ :Optional[int] = (cos(lowercase__ ) ** 2) * (sin(lowercase__ ) ** 2)
UpperCamelCase__ :Optional[Any] = sin(sigma / 2 ) ** 2
UpperCamelCase__ :List[str] = (sigma + sin(lowercase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase , UpperCamelCase , UpperCamelCase = False, False, False
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[int] = None
_snake_case : bool = True
_snake_case : bool = True
_snake_case : Optional[str] = None
# Automatically constructed
_snake_case : ClassVar[str] = "dict"
_snake_case : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
_snake_case : str = field(default="""Audio""" , init=lowercase , repr=lowercase )
def __call__( self :List[str] ):
return self.pa_type
def __a ( self :List[Any] , lowerCamelCase__ :Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"bytes": None, "path": value}
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCamelCase__ :str = BytesIO()
sf.write(lowerCamelCase__ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCamelCase__ :List[Any] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
UpperCamelCase__ :int = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_27_67
UpperCamelCase__ :Optional[Any] = BytesIO(bytes() )
sf.write(lowerCamelCase__ , lowerCamelCase__ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __a ( self :Any , lowerCamelCase__ :dict , lowerCamelCase__ :Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
UpperCamelCase__ , UpperCamelCase__ :int = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
UpperCamelCase__ :int = xsplitext(lowerCamelCase__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
UpperCamelCase__ :Dict = token_per_repo_id or {}
UpperCamelCase__ :Tuple = path.split("""::""" )[-1]
try:
UpperCamelCase__ :List[str] = string_to_dict(lowerCamelCase__ , config.HUB_DATASETS_URL )["""repo_id"""]
UpperCamelCase__ :Dict = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCamelCase__ :List[str] = None
with xopen(lowerCamelCase__ , """rb""" , use_auth_token=lowerCamelCase__ ) as f:
UpperCamelCase__ , UpperCamelCase__ :Dict = sf.read(lowerCamelCase__ )
else:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = sf.read(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = array.T
if self.mono:
UpperCamelCase__ :str = librosa.to_mono(lowerCamelCase__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCamelCase__ :Optional[int] = librosa.resample(lowerCamelCase__ , orig_sr=lowerCamelCase__ , target_sr=self.sampling_rate )
UpperCamelCase__ :List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Dict ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def __a ( self :List[str] , lowerCamelCase__ :Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCamelCase__ :List[Any] = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
UpperCamelCase__ :int = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ :Dict = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
UpperCamelCase__ :Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
UpperCamelCase__ :int = pa.array([Audio().encode_example(lowerCamelCase__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
UpperCamelCase__ :int = storage.field("""bytes""" )
else:
UpperCamelCase__ :Union[str, Any] = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
UpperCamelCase__ :Tuple = storage.field("""path""" )
else:
UpperCamelCase__ :Any = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
UpperCamelCase__ :Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type )
def __a ( self :Dict , lowerCamelCase__ :pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowerCamelCase__ :int ):
with xopen(lowerCamelCase__ , """rb""" ) as f:
UpperCamelCase__ :List[Any] = f.read()
return bytes_
UpperCamelCase__ :Any = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ :int = pa.array(
[os.path.basename(lowerCamelCase__ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
UpperCamelCase__ :Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type )
| 45 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 | 1 |
import string
import numpy
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , lowercase__ )
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : int = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_snake_case : Dict = numpy.vectorize(lambda lowercase : x % 36 )
_snake_case : Optional[Any] = numpy.vectorize(lowercase )
def __init__( self :Optional[Any] , lowerCamelCase__ :numpy.ndarray ):
UpperCamelCase__ :Dict = self.modulus(lowerCamelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCamelCase__ :Any = encrypt_key.shape[0]
def __a ( self :List[Any] , lowerCamelCase__ :str ):
return self.key_string.index(lowerCamelCase__ )
def __a ( self :List[Any] , lowerCamelCase__ :int ):
return self.key_string[round(lowerCamelCase__ )]
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCamelCase__ :str = det % len(self.key_string )
UpperCamelCase__ :int = len(self.key_string )
if greatest_common_divisor(lowerCamelCase__ , len(self.key_string ) ) != 1:
UpperCamelCase__ :int = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(lowerCamelCase__ )
def __a ( self :int , lowerCamelCase__ :str ):
UpperCamelCase__ :str = [char for char in text.upper() if char in self.key_string]
UpperCamelCase__ :int = chars[-1]
while len(lowerCamelCase__ ) % self.break_key != 0:
chars.append(lowerCamelCase__ )
return "".join(lowerCamelCase__ )
def __a ( self :Tuple , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[int] = self.process_text(text.upper() )
UpperCamelCase__ :Optional[int] = """"""
for i in range(0 , len(lowerCamelCase__ ) - self.break_key + 1 , self.break_key ):
UpperCamelCase__ :Optional[Any] = text[i : i + self.break_key]
UpperCamelCase__ :Any = [self.replace_letters(lowerCamelCase__ ) for char in batch]
UpperCamelCase__ :Any = numpy.array([vec] ).T
UpperCamelCase__ :Optional[int] = self.modulus(self.encrypt_key.dot(lowerCamelCase__ ) ).T.tolist()[
0
]
UpperCamelCase__ :Dict = """""".join(
self.replace_digits(lowerCamelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __a ( self :Optional[int] ):
UpperCamelCase__ :int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCamelCase__ :int = det % len(self.key_string )
UpperCamelCase__ :List[Any] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCamelCase__ :Optional[Any] = i
break
UpperCamelCase__ :Optional[int] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowerCamelCase__ ) )
def __a ( self :Tuple , lowerCamelCase__ :str ):
UpperCamelCase__ :List[str] = self.make_decrypt_key()
UpperCamelCase__ :Dict = self.process_text(text.upper() )
UpperCamelCase__ :Any = """"""
for i in range(0 , len(lowerCamelCase__ ) - self.break_key + 1 , self.break_key ):
UpperCamelCase__ :List[str] = text[i : i + self.break_key]
UpperCamelCase__ :Dict = [self.replace_letters(lowerCamelCase__ ) for char in batch]
UpperCamelCase__ :List[str] = numpy.array([vec] ).T
UpperCamelCase__ :Optional[Any] = self.modulus(decrypt_key.dot(lowerCamelCase__ ) ).T.tolist()[0]
UpperCamelCase__ :str = """""".join(
self.replace_digits(lowerCamelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def A ( ) -> None:
UpperCamelCase__ :str = int(input("""Enter the order of the encryption key: """ ) )
UpperCamelCase__ :List[Any] = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(lowercase__ ):
UpperCamelCase__ :Any = [int(lowercase__ ) for x in input().split()]
hill_matrix.append(lowercase__ )
UpperCamelCase__ :Dict = HillCipher(numpy.array(lowercase__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
UpperCamelCase__ :Optional[int] = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
UpperCamelCase__ :List[str] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(lowercase__ ) )
elif option == "2":
UpperCamelCase__ :Any = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def A ( lowercase__ : Optional[Any] ) -> Any:
UpperCamelCase__ :List[Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def A ( lowercase__ : List[Any] ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = emb.weight.shape
UpperCamelCase__ :Tuple = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
UpperCamelCase__ :Optional[Any] = emb.weight.data
return lin_layer
def A ( lowercase__ : Optional[int] , lowercase__ : List[str]=None ) -> str:
UpperCamelCase__ :int = {}
for old_key in state_dict.keys():
UpperCamelCase__ :Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCamelCase__ :Optional[int] = key.replace("""moe_layer.experts.0""" , f"""ffn.experts.expert_{expert_idx}""" )
else:
UpperCamelCase__ :Tuple = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
UpperCamelCase__ :Optional[Any] = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
UpperCamelCase__ :List[Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
UpperCamelCase__ :List[str] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
UpperCamelCase__ :Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
UpperCamelCase__ :Optional[int] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
UpperCamelCase__ :Dict = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
UpperCamelCase__ :Tuple = state_dict[old_key]
return new_dict
def A ( lowercase__ : Any , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : str = WEIGHTS_NAME ) -> Dict:
UpperCamelCase__ :Dict = []
UpperCamelCase__ :Optional[Any] = 0
os.makedirs(lowercase__ , exist_ok=lowercase__ )
for expert in range(lowercase__ ):
UpperCamelCase__ :Optional[int] = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(lowercase__ ):
UpperCamelCase__ :Union[str, Any] = torch.load(lowercase__ )["""model"""]
remove_ignore_keys_(lowercase__ )
UpperCamelCase__ :Union[str, Any] = rename_fairseq_keys(lowercase__ , lowercase__ )
UpperCamelCase__ :Optional[Any] = os.path.join(
lowercase__ , weights_name.replace(""".bin""" , f"""-{len(lowercase__ )+1:05d}-of-???.bin""" ) )
torch.save(lowercase__ , lowercase__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(lowercase__ )[0]].dtype )
# Add the last block
UpperCamelCase__ :Optional[int] = os.path.join(lowercase__ , weights_name.replace(""".bin""" , f"""-{len(lowercase__ )+1:05d}-of-???.bin""" ) )
UpperCamelCase__ :Union[str, Any] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(lowercase__ )
UpperCamelCase__ :int = rename_fairseq_keys(lowercase__ , lowercase__ )
UpperCamelCase__ :List[Any] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(lowercase__ ) == 1:
UpperCamelCase__ :Dict = os.path.join(lowercase__ , lowercase__ )
torch.save(lowercase__ , lowercase__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(lowercase__ , lowercase__ )
# Otherwise, let's build the index
UpperCamelCase__ :Union[str, Any] = {}
for idx, shard in enumerate(lowercase__ ):
UpperCamelCase__ :Union[str, Any] = weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase__ ):05d}.bin""" )
UpperCamelCase__ :str = os.path.join(lowercase__ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
for key in shard:
UpperCamelCase__ :Any = shard_file
# Add the metadata
UpperCamelCase__ :List[Any] = {"""total_size""": total_size}
UpperCamelCase__ :str = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase__ :List[Any] = json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + """\n"""
f.write(lowercase__ )
return metadata, index
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase , UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 45 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 | 1 |
from scipy.stats import spearmanr
import datasets
UpperCamelCase = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
UpperCamelCase = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
UpperCamelCase = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __a ( self :List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def __a ( self :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=False ):
UpperCamelCase__ :Any = spearmanr(lowerCamelCase__ , lowerCamelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "vocab.txt"}
UpperCamelCase = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
UpperCamelCase = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
UpperCamelCase = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Any = VOCAB_FILES_NAMES
_snake_case : Any = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_INIT_CONFIGURATION
_snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ConvBertTokenizer
def __init__( self :int , lowerCamelCase__ :Dict=None , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Optional[int]="[UNK]" , lowerCamelCase__ :Optional[Any]="[SEP]" , lowerCamelCase__ :List[Any]="[PAD]" , lowerCamelCase__ :List[str]="[CLS]" , lowerCamelCase__ :Optional[Any]="[MASK]" , lowerCamelCase__ :Any=True , lowerCamelCase__ :Tuple=None , **lowerCamelCase__ :Optional[int] , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ :List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase__ ) != tokenize_chinese_chars
):
UpperCamelCase__ :List[str] = getattr(lowerCamelCase__ , normalizer_state.pop("""type""" ) )
UpperCamelCase__ :Optional[Any] = do_lower_case
UpperCamelCase__ :List[str] = strip_accents
UpperCamelCase__ :Optional[Any] = tokenize_chinese_chars
UpperCamelCase__ :Optional[Any] = normalizer_class(**lowerCamelCase__ )
UpperCamelCase__ :Tuple = do_lower_case
def __a ( self :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict=None ):
UpperCamelCase__ :Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self :Dict , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[str] = None ):
UpperCamelCase__ :Optional[int] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 45 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCamelCase = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(lowercase )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Union[str, Any] = """rag"""
_snake_case : Optional[Any] = True
def __init__( self :List[Any] , lowerCamelCase__ :Tuple=None , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Tuple=None , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , lowerCamelCase__ :Optional[Any]=None , lowerCamelCase__ :Optional[int]=None , lowerCamelCase__ :str=" / " , lowerCamelCase__ :List[Any]=" // " , lowerCamelCase__ :Tuple=5 , lowerCamelCase__ :List[str]=3_00 , lowerCamelCase__ :Any=7_68 , lowerCamelCase__ :Any=8 , lowerCamelCase__ :Optional[Any]="wiki_dpr" , lowerCamelCase__ :int="train" , lowerCamelCase__ :Optional[int]="compressed" , lowerCamelCase__ :Any=None , lowerCamelCase__ :Optional[Any]=None , lowerCamelCase__ :Union[str, Any]=False , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :str=0.0 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=False , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :str=False , lowerCamelCase__ :Tuple=True , lowerCamelCase__ :Dict=None , **lowerCamelCase__ :Optional[int] , ):
super().__init__(
bos_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , forced_eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , prefix=lowerCamelCase__ , vocab_size=lowerCamelCase__ , **lowerCamelCase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCamelCase__ :Dict = kwargs.pop("""question_encoder""" )
UpperCamelCase__ :int = question_encoder_config.pop("""model_type""" )
UpperCamelCase__ :str = kwargs.pop("""generator""" )
UpperCamelCase__ :List[Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
UpperCamelCase__ :List[Any] = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Any = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = reduce_loss
UpperCamelCase__ :Optional[int] = label_smoothing
UpperCamelCase__ :Tuple = exclude_bos_score
UpperCamelCase__ :Tuple = do_marginalize
UpperCamelCase__ :Tuple = title_sep
UpperCamelCase__ :Optional[Any] = doc_sep
UpperCamelCase__ :Optional[int] = n_docs
UpperCamelCase__ :List[str] = max_combined_length
UpperCamelCase__ :Dict = dataset
UpperCamelCase__ :Any = dataset_split
UpperCamelCase__ :List[str] = index_name
UpperCamelCase__ :Optional[Any] = retrieval_vector_size
UpperCamelCase__ :Dict = retrieval_batch_size
UpperCamelCase__ :Union[str, Any] = passages_path
UpperCamelCase__ :Dict = index_path
UpperCamelCase__ :int = use_dummy_dataset
UpperCamelCase__ :Optional[Any] = output_retrieved
UpperCamelCase__ :Any = do_deduplication
UpperCamelCase__ :Tuple = use_cache
if self.forced_eos_token_id is None:
UpperCamelCase__ :List[Any] = getattr(self.generator , """forced_eos_token_id""" , lowerCamelCase__ )
@classmethod
def __a ( cls :Optional[int] , lowerCamelCase__ :PretrainedConfig , lowerCamelCase__ :PretrainedConfig , **lowerCamelCase__ :List[str] ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Any = copy.deepcopy(self.__dict__ )
UpperCamelCase__ :Any = self.question_encoder.to_dict()
UpperCamelCase__ :int = self.generator.to_dict()
UpperCamelCase__ :str = self.__class__.model_type
return output
| 45 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 1 |
from math import factorial
def A ( lowercase__ : int , lowercase__ : int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowercase__ ) // (factorial(lowercase__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 45 |
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def A ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : List[str] ) -> str:
# Initialise PyTorch model
UpperCamelCase__ :Tuple = FunnelConfig.from_json_file(lowercase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase__ :int = FunnelBaseModel(lowercase__ ) if base_model else FunnelModel(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 45 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Union[str, Any] = """efficientformer"""
def __init__( self :Dict , lowerCamelCase__ :List[int] = [3, 2, 6, 4] , lowerCamelCase__ :List[int] = [48, 96, 2_24, 4_48] , lowerCamelCase__ :List[bool] = [True, True, True, True] , lowerCamelCase__ :int = 4_48 , lowerCamelCase__ :int = 32 , lowerCamelCase__ :int = 4 , lowerCamelCase__ :int = 7 , lowerCamelCase__ :int = 5 , lowerCamelCase__ :int = 8 , lowerCamelCase__ :int = 4 , lowerCamelCase__ :float = 0.0 , lowerCamelCase__ :int = 16 , lowerCamelCase__ :int = 3 , lowerCamelCase__ :int = 3 , lowerCamelCase__ :int = 3 , lowerCamelCase__ :int = 2 , lowerCamelCase__ :int = 1 , lowerCamelCase__ :float = 0.0 , lowerCamelCase__ :int = 1 , lowerCamelCase__ :bool = True , lowerCamelCase__ :bool = True , lowerCamelCase__ :float = 1e-5 , lowerCamelCase__ :str = "gelu" , lowerCamelCase__ :float = 0.02 , lowerCamelCase__ :float = 1e-12 , lowerCamelCase__ :int = 2_24 , lowerCamelCase__ :float = 1e-05 , **lowerCamelCase__ :Optional[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :Dict = hidden_sizes
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :Dict = initializer_range
UpperCamelCase__ :Optional[int] = layer_norm_eps
UpperCamelCase__ :List[Any] = patch_size
UpperCamelCase__ :Optional[int] = num_channels
UpperCamelCase__ :Tuple = depths
UpperCamelCase__ :Optional[Any] = mlp_expansion_ratio
UpperCamelCase__ :Dict = downsamples
UpperCamelCase__ :int = dim
UpperCamelCase__ :Dict = key_dim
UpperCamelCase__ :Optional[Any] = attention_ratio
UpperCamelCase__ :str = resolution
UpperCamelCase__ :str = pool_size
UpperCamelCase__ :str = downsample_patch_size
UpperCamelCase__ :Optional[Any] = downsample_stride
UpperCamelCase__ :Tuple = downsample_pad
UpperCamelCase__ :List[str] = drop_path_rate
UpperCamelCase__ :List[Any] = num_metaad_blocks
UpperCamelCase__ :Dict = distillation
UpperCamelCase__ :Dict = use_layer_scale
UpperCamelCase__ :str = layer_scale_init_value
UpperCamelCase__ :Union[str, Any] = image_size
UpperCamelCase__ :Tuple = batch_norm_eps
| 45 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = GPTSwaTokenizer
_snake_case : Dict = False
_snake_case : str = True
_snake_case : int = False
def __a ( self :Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ :int = GPTSwaTokenizer(lowerCamelCase__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self :Dict , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :List[Any] = """This is a test"""
UpperCamelCase__ :Optional[Any] = """This is a test"""
return input_text, output_text
def __a ( self :Optional[Any] ):
UpperCamelCase__ :int = """<s>"""
UpperCamelCase__ :Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowerCamelCase__ ) , 20_00 )
def __a ( self :List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def __a ( self :Any ):
UpperCamelCase__ :Union[str, Any] = GPTSwaTokenizer(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
UpperCamelCase__ :Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
UpperCamelCase__ :Tuple = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
UpperCamelCase__ :Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
# fmt: off
self.assertListEqual(
lowerCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Dict = GPTSwaTokenizer(lowerCamelCase__ )
UpperCamelCase__ :List[str] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
UpperCamelCase__ :Union[str, Any] = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
UpperCamelCase__ :Tuple = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
UpperCamelCase__ :str = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=lowerCamelCase__ , )
| 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[Any] = """Wav2Vec2FeatureExtractor"""
_snake_case : Optional[int] = """AutoTokenizer"""
def __init__( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = self.feature_extractor
UpperCamelCase__ :Optional[int] = False
@classmethod
def __a ( cls :Tuple , lowerCamelCase__ :Any , **lowerCamelCase__ :str ):
try:
return super().from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , lowerCamelCase__ , )
UpperCamelCase__ :List[str] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Dict = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
return cls(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
def __call__( self :Tuple , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
UpperCamelCase__ :Optional[Any] = kwargs.pop("""raw_speech""" )
else:
UpperCamelCase__ :int = kwargs.pop("""audio""" , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = kwargs.pop("""sampling_rate""" , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = kwargs.pop("""text""" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
UpperCamelCase__ :Tuple = args[0]
UpperCamelCase__ :Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
UpperCamelCase__ :List[Any] = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
UpperCamelCase__ :List[Any] = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase__ :int = encodings["""input_ids"""]
return inputs
def __a ( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Tuple = kwargs.pop("""input_features""" , lowerCamelCase__ )
UpperCamelCase__ :str = kwargs.pop("""labels""" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
UpperCamelCase__ :Dict = args[0]
UpperCamelCase__ :int = args[1:]
if input_features is not None:
UpperCamelCase__ :Dict = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
UpperCamelCase__ :int = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCamelCase__ :List[Any] = labels["""input_ids"""]
return input_features
def __a ( self :List[Any] , *lowerCamelCase__ :Tuple , **lowerCamelCase__ :List[Any] ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :Tuple , *lowerCamelCase__ :str , **lowerCamelCase__ :List[Any] ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def __a ( self :Optional[Any] ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
UpperCamelCase__ :Optional[int] = True
UpperCamelCase__ :Dict = self.tokenizer
yield
UpperCamelCase__ :int = self.feature_extractor
UpperCamelCase__ :Union[str, Any] = False
| 45 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 | 1 |
def A ( ) -> Optional[Any]:
UpperCamelCase__ :int = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowercase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 45 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 | 1 |
import math
import unittest
def A ( lowercase__ : int ) -> bool:
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Tuple ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __a ( self :Any ):
with self.assertRaises(lowerCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 45 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 | 1 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def A ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[Any] ) -> Optional[Any]:
if isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ :Tuple = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ )
else:
UpperCamelCase__ :List[str] = np.full((len(lowercase__ ), sequence_length) , lowercase__ )
for i, tensor in enumerate(lowercase__ ):
if padding_side == "right":
if isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ :List[Any] = tensor[:sequence_length]
else:
UpperCamelCase__ :Dict = tensor[:sequence_length]
else:
if isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ :Optional[int] = tensor[:sequence_length]
else:
UpperCamelCase__ :Optional[int] = tensor[:sequence_length]
return out_tensor.tolist()
def A ( lowercase__ : Dict ) -> Optional[Any]:
UpperCamelCase__ :List[Any] = ord(lowercase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCamelCase__ :str = unicodedata.category(lowercase__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : PreTrainedTokenizerBase
_snake_case : Union[bool, str, PaddingStrategy] = True
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : int = -100
_snake_case : str = "pt"
def __a ( self :Any , lowerCamelCase__ :int ):
import torch
UpperCamelCase__ :str = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase__ :str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCamelCase__ :Any = self.tokenizer.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
UpperCamelCase__ :List[Any] = torch.tensor(batch["""entity_ids"""] ).shape[1]
UpperCamelCase__ :Optional[int] = self.tokenizer.padding_side
if padding_side == "right":
UpperCamelCase__ :Optional[Any] = [
list(lowerCamelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase__ )) for label in labels
]
else:
UpperCamelCase__ :Union[str, Any] = [
[self.label_pad_token_id] * (sequence_length - len(lowerCamelCase__ )) + list(lowerCamelCase__ ) for label in labels
]
UpperCamelCase__ :Optional[Any] = [feature["""ner_tags"""] for feature in features]
UpperCamelCase__ :Optional[int] = padding_tensor(lowerCamelCase__ , -1 , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [feature["""original_entity_spans"""] for feature in features]
UpperCamelCase__ :Tuple = padding_tensor(lowerCamelCase__ , (-1, -1) , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = {k: torch.tensor(lowerCamelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 45 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def A ( lowercase__ : int , lowercase__ : Union[str, Any]=False ) -> Dict:
try:
UpperCamelCase__ :str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ :Dict = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ :Any = strtobool(lowercase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase = parse_flag_from_env("RUN_SLOW", default=False)
UpperCamelCase = parse_flag_from_env("RUN_REMOTE", default=False)
UpperCamelCase = parse_flag_from_env("RUN_LOCAL", default=True)
UpperCamelCase = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def A ( lowercase__ : Any ) -> List[Any]:
try:
import faiss # noqa
except ImportError:
UpperCamelCase__ :Optional[Any] = unittest.skip("""test requires faiss""" )(lowercase__ )
return test_case
def A ( lowercase__ : List[str] ) -> List[str]:
try:
import regex # noqa
except ImportError:
UpperCamelCase__ :List[str] = unittest.skip("""test requires regex""" )(lowercase__ )
return test_case
def A ( lowercase__ : int ) -> List[str]:
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase__ :Tuple = unittest.skip("""test requires elasticsearch""" )(lowercase__ )
return test_case
def A ( lowercase__ : Dict ) -> str:
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase__ :str = unittest.skip("""test requires sqlalchemy""" )(lowercase__ )
return test_case
def A ( lowercase__ : List[str] ) -> List[str]:
if not config.TORCH_AVAILABLE:
UpperCamelCase__ :Tuple = unittest.skip("""test requires PyTorch""" )(lowercase__ )
return test_case
def A ( lowercase__ : Dict ) -> Union[str, Any]:
if not config.TF_AVAILABLE:
UpperCamelCase__ :List[str] = unittest.skip("""test requires TensorFlow""" )(lowercase__ )
return test_case
def A ( lowercase__ : Tuple ) -> Optional[int]:
if not config.JAX_AVAILABLE:
UpperCamelCase__ :str = unittest.skip("""test requires JAX""" )(lowercase__ )
return test_case
def A ( lowercase__ : List[Any] ) -> Optional[Any]:
if not config.PIL_AVAILABLE:
UpperCamelCase__ :Optional[int] = unittest.skip("""test requires Pillow""" )(lowercase__ )
return test_case
def A ( lowercase__ : Any ) -> Any:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Union[str, Any] ) -> Any:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Optional[Any] ) -> Union[str, Any]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Optional[Any] ) -> str:
def _require_spacy_model(lowercase__ : Tuple ):
try:
import spacy # noqa F401
spacy.load(lowercase__ )
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase__ )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(lowercase__ ) )(lowercase__ )
else:
return test_case
return _require_spacy_model
def A ( lowercase__ : int ) -> List[str]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Dict ) -> List[str]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : List[str] ) -> Dict:
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase__ :Optional[int] = unittest.skip("""test is slow""" )(lowercase__ )
return test_case
def A ( lowercase__ : Optional[Any] ) -> int:
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase__ :Dict = unittest.skip("""test is local""" )(lowercase__ )
return test_case
def A ( lowercase__ : Optional[Any] ) -> List[Any]:
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase__ :str = unittest.skip("""test is packaged""" )(lowercase__ )
return test_case
def A ( lowercase__ : int ) -> List[Any]:
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase__ :List[Any] = unittest.skip("""test requires remote""" )(lowercase__ )
return test_case
def A ( *lowercase__ : List[Any] ) -> str:
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(lowercase__ ) and name.startswith("""test""" ):
for decorator in decorators:
UpperCamelCase__ :Any = decorator(lowercase__ )
setattr(cls , lowercase__ , lowercase__ )
return cls
return decorate
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
pass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = 0
_snake_case : Any = 1
_snake_case : Union[str, Any] = 2
@contextmanager
def A ( lowercase__ : Dict=OfflineSimulationMode.CONNECTION_FAILS , lowercase__ : Tuple=1E-16 ) -> Union[str, Any]:
UpperCamelCase__ :List[str] = requests.Session().request
def timeout_request(lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , **lowercase__ : int ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase__ :Dict = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCamelCase__ :List[str] = timeout
try:
return online_request(lowercase__ , lowercase__ , **lowercase__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase__ :Optional[int] = url
UpperCamelCase__ :Union[str, Any] = e.args[0]
UpperCamelCase__ :List[Any] = (max_retry_error.args[0].replace("""10.255.255.1""" , f"""OfflineMock[{url}]""" ),)
UpperCamelCase__ :Optional[Any] = (max_retry_error,)
raise
def raise_connection_error(lowercase__ : Union[str, Any] , lowercase__ : Tuple , **lowercase__ : Any ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=lowercase__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , lowercase__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , lowercase__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def A ( *lowercase__ : Tuple , **lowercase__ : List[Any] ) -> Optional[int]:
UpperCamelCase__ :Any = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowercase__ , **lowercase__ ) as tmp_dir:
try:
os.chdir(lowercase__ )
yield
finally:
os.chdir(lowercase__ )
@contextmanager
def A ( ) -> List[Any]:
import gc
gc.collect()
UpperCamelCase__ :Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def A ( ) -> Optional[int]:
import gc
gc.collect()
UpperCamelCase__ :int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def A ( lowercase__ : List[str] , lowercase__ : List[str] ) -> str:
return deepcopy(lowercase__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowercase__ ).integers(0 , 100 , 10 ).tolist()
def A ( lowercase__ : str ) -> Union[str, Any]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowercase__ : int , *lowercase__ : str , **lowercase__ : Optional[int] ):
try:
return func(*lowercase__ , **lowercase__ )
except HTTPError as err:
if str(lowercase__ ).startswith("""500""" ) or str(lowercase__ ).startswith("""502""" ):
pytest.xfail(str(lowercase__ ) )
raise err
return decorator.decorator(_wrapper , lowercase__ )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :Union[str, Any] = returncode
UpperCamelCase__ :Tuple = stdout
UpperCamelCase__ :List[Any] = stderr
async def A ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Dict:
while True:
UpperCamelCase__ :str = await stream.readline()
if line:
callback(lowercase__ )
else:
break
async def A ( lowercase__ : Optional[Any] , lowercase__ : Tuple=None , lowercase__ : Union[str, Any]=None , lowercase__ : Dict=None , lowercase__ : Optional[Any]=False , lowercase__ : List[Any]=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(lowercase__ ) )
UpperCamelCase__ :int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowercase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowercase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ :Dict = []
UpperCamelCase__ :int = []
def tee(lowercase__ : int , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Tuple="" ):
UpperCamelCase__ :int = line.decode("""utf-8""" ).rstrip()
sink.append(lowercase__ )
if not quiet:
print(lowercase__ , lowercase__ , file=lowercase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowercase__ : tee(lowercase__ , lowercase__ , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda lowercase__ : tee(lowercase__ , lowercase__ , sys.stderr , label="""stderr:""" ) ),
] , timeout=lowercase__ , )
return _RunOutput(await p.wait() , lowercase__ , lowercase__ )
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : List[str]=180 , lowercase__ : Any=False , lowercase__ : Optional[Any]=True ) -> _RunOutput:
UpperCamelCase__ :Optional[Any] = asyncio.get_event_loop()
UpperCamelCase__ :Optional[int] = loop.run_until_complete(
_stream_subprocess(lowercase__ , env=lowercase__ , stdin=lowercase__ , timeout=lowercase__ , quiet=lowercase__ , echo=lowercase__ ) )
UpperCamelCase__ :Tuple = """ """.join(lowercase__ )
if result.returncode > 0:
UpperCamelCase__ :str = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def A ( ) -> Optional[int]:
UpperCamelCase__ :str = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
UpperCamelCase__ :Tuple = re.sub(r"""^gw""" , """""" , lowercase__ , 0 , re.M )
return int(lowercase__ )
def A ( ) -> str:
UpperCamelCase__ :Optional[Any] = 2_9500
UpperCamelCase__ :List[str] = pytest_xdist_worker_id()
return port + uniq_delta
| 45 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 | 1 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def A ( lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[Any]:
UpperCamelCase__ :Tuple = os.path.abspath(lowercase__ )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
UpperCamelCase__ :Any = tf.train.list_variables(lowercase__ )
UpperCamelCase__ :Optional[Any] = []
UpperCamelCase__ :List[str] = []
UpperCamelCase__ :Tuple = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
UpperCamelCase__ :Tuple = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
UpperCamelCase__ :str = name[1:]
# figure out how many levels deep the name is
UpperCamelCase__ :Union[str, Any] = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(lowercase__ )
# read data
UpperCamelCase__ :List[str] = tf.train.load_variable(lowercase__ , lowercase__ )
names.append("""/""".join(lowercase__ ) )
arrays.append(lowercase__ )
logger.info(f"""Read a total of {len(lowercase__ ):,} layers""" )
# Sanity check
if len(set(lowercase__ ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(lowercase__ ) )})""" )
UpperCamelCase__ :Dict = list(set(lowercase__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(lowercase__ , lowercase__ ):
UpperCamelCase__ :str = full_name.split("""/""" )
UpperCamelCase__ :Union[str, Any] = model
UpperCamelCase__ :Optional[Any] = []
for i, m_name in enumerate(lowercase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
UpperCamelCase__ :Optional[Any] = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
UpperCamelCase__ :Optional[Any] = getattr(lowercase__ , """embeddings""" )
UpperCamelCase__ :List[str] = getattr(lowercase__ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
UpperCamelCase__ :Dict = getattr(lowercase__ , """encoder""" )
UpperCamelCase__ :Optional[Any] = getattr(lowercase__ , """layer""" )
UpperCamelCase__ :List[Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
UpperCamelCase__ :Optional[Any] = getattr(lowercase__ , """pooler""" )
UpperCamelCase__ :Optional[Any] = getattr(lowercase__ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
UpperCamelCase__ :str = getattr(lowercase__ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
UpperCamelCase__ :int = getattr(lowercase__ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
UpperCamelCase__ :Dict = getattr(lowercase__ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
UpperCamelCase__ :int = getattr(lowercase__ , """token_type_embeddings""" )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append("""weight""" )
UpperCamelCase__ :Union[str, Any] = getattr(lowercase__ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
UpperCamelCase__ :Optional[Any] = getattr(lowercase__ , """attention""" )
UpperCamelCase__ :int = getattr(lowercase__ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
UpperCamelCase__ :Optional[Any] = getattr(lowercase__ , """attention""" )
UpperCamelCase__ :List[Any] = getattr(lowercase__ , """output""" )
UpperCamelCase__ :List[str] = getattr(lowercase__ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
UpperCamelCase__ :Dict = getattr(lowercase__ , """attention""" )
UpperCamelCase__ :List[Any] = getattr(lowercase__ , """output""" )
UpperCamelCase__ :Optional[Any] = getattr(lowercase__ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
UpperCamelCase__ :Tuple = getattr(lowercase__ , """output""" )
UpperCamelCase__ :List[Any] = getattr(lowercase__ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
UpperCamelCase__ :List[str] = getattr(lowercase__ , """output""" )
UpperCamelCase__ :Optional[int] = getattr(lowercase__ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
UpperCamelCase__ :List[str] = getattr(lowercase__ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
UpperCamelCase__ :int = getattr(lowercase__ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
UpperCamelCase__ :Optional[int] = getattr(lowercase__ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
UpperCamelCase__ :Optional[int] = getattr(lowercase__ , """intermediate""" )
UpperCamelCase__ :Optional[int] = getattr(lowercase__ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
UpperCamelCase__ :int = getattr(lowercase__ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
UpperCamelCase__ :Any = getattr(lowercase__ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
UpperCamelCase__ :Optional[Any] = getattr(lowercase__ , """weight""" )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
UpperCamelCase__ :int = """.""".join(lowercase__ )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowercase__ ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , lowercase__ ):
UpperCamelCase__ :Tuple = array.reshape(pointer.data.shape )
if "kernel" in full_name:
UpperCamelCase__ :Union[str, Any] = array.transpose()
if pointer.shape == array.shape:
UpperCamelCase__ :Tuple = torch.from_numpy(lowercase__ )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def A ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
# Instantiate model
logger.info(f"""Loading model based on config from {config_path}...""" )
UpperCamelCase__ :Dict = BertConfig.from_json_file(lowercase__ )
UpperCamelCase__ :Dict = BertModel(lowercase__ )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
UpperCamelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 45 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase__ : Optional[int] , lowercase__ : Any ) -> Any:
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A ( lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
UpperCamelCase__ :List[str] = tmp_path / """cache"""
UpperCamelCase__ :Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :int = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A ( lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> Tuple:
UpperCamelCase__ :Tuple = tmp_path / """cache"""
UpperCamelCase__ :Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :Union[str, Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> List[str]:
UpperCamelCase__ :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase__ :List[str] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :Dict = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :str = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A ( lowercase__ : Optional[Any] , lowercase__ : int ) -> Dict:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCamelCase__ :int = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
UpperCamelCase__ :Tuple = features.copy()
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = tmp_path / """cache"""
UpperCamelCase__ :Any = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A ( lowercase__ : str , lowercase__ : Any , lowercase__ : Optional[Any] ) -> Any:
UpperCamelCase__ :Tuple = tmp_path / """cache"""
UpperCamelCase__ :Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :Any = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def A ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> Tuple:
if issubclass(lowercase__ , lowercase__ ):
UpperCamelCase__ :Any = jsonl_path
elif issubclass(lowercase__ , lowercase__ ):
UpperCamelCase__ :List[str] = [jsonl_path]
UpperCamelCase__ :Optional[int] = tmp_path / """cache"""
UpperCamelCase__ :str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :List[Any] = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
def A ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Tuple=("train",) ) -> Optional[Any]:
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
UpperCamelCase__ :Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> Optional[Any]:
UpperCamelCase__ :int = tmp_path / """cache"""
UpperCamelCase__ :List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :List[Any] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : List[Any] ) -> List[Any]:
UpperCamelCase__ :int = tmp_path / """cache"""
UpperCamelCase__ :Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :str = features.copy() if features else default_expected_features
UpperCamelCase__ :Optional[int] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :List[Any] = JsonDatasetReader({"""train""": jsonl_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A ( lowercase__ : Tuple , lowercase__ : str , lowercase__ : int ) -> List[Any]:
if split:
UpperCamelCase__ :Dict = {split: jsonl_path}
else:
UpperCamelCase__ :Tuple = """train"""
UpperCamelCase__ :Any = {"""train""": jsonl_path, """test""": jsonl_path}
UpperCamelCase__ :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase__ :Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :int = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase__ : List[str] ) -> List[str]:
return json.load(lowercase__ )
def A ( lowercase__ : Optional[Any] ) -> Any:
return [json.loads(lowercase__ ) for line in buffer]
class lowerCAmelCase_ :
"""simple docstring"""
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __a ( self :str , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ ).write()
buffer.seek(0 )
UpperCamelCase__ :str = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __a ( self :Any , lowerCamelCase__ :Any , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCamelCase__ ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __a ( self :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __a ( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :str , lowerCamelCase__ :Dict , lowerCamelCase__ :Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[Any] = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCamelCase__ ) == 10
def __a ( self :Tuple , lowerCamelCase__ :List[Any] ):
with pytest.raises(lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / f"""test.json.{extension}"""
UpperCamelCase__ :Optional[int] = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , compression=lowerCamelCase__ ).write()
with fsspec.open(lowerCamelCase__ , """rb""" , compression="""infer""" ) as f:
UpperCamelCase__ :Union[str, Any] = f.read()
with fsspec.open(lowerCamelCase__ , """rb""" , compression="""infer""" ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content
| 45 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = "pytorch_model.bin"
@dataclasses.dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
_snake_case : Optional[str] = dataclasses.field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
_snake_case : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
_snake_case : Optional[str] = dataclasses.field(
default=lowercase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
_snake_case : Optional[str] = dataclasses.field(
default=lowercase , metadata={"""help""": """The name of the task to train on."""} , )
_snake_case : Optional[List[str]] = dataclasses.field(
default=lowercase , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
_snake_case : Optional[str] = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
_snake_case : Optional[str] = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
_snake_case : Optional[int] = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
_snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
_snake_case : Optional[bool] = dataclasses.field(
default=lowercase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
_snake_case : Optional[bool] = dataclasses.field(
default=lowercase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
_snake_case : Optional[bool] = dataclasses.field(
default=lowercase , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
_snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
_snake_case : Optional[int] = dataclasses.field(
default=100 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
_snake_case : Optional[int] = dataclasses.field(
default=lowercase , metadata={"""help""": """Random seed for initialization."""} , )
def A ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] ) -> List[str]:
UpperCamelCase__ :Union[str, Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCamelCase__ :List[str] = dataset.filter(lambda lowercase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCamelCase__ :Any = int(eval_result * len(lowercase__ ) )
print(lowercase__ )
UpperCamelCase__ :Optional[Any] = dataset.sort("""probability""" , reverse=lowercase__ )
UpperCamelCase__ :Optional[Any] = dataset.select(range(lowercase__ ) )
UpperCamelCase__ :int = dataset.remove_columns(["""label""", """probability"""] )
UpperCamelCase__ :str = dataset.rename_column("""prediction""" , """label""" )
UpperCamelCase__ :Optional[Any] = dataset.map(lambda lowercase__ : {"label": idalabel[example["label"]]} )
UpperCamelCase__ :Dict = dataset.shuffle(seed=args.seed )
UpperCamelCase__ :int = os.path.join(lowercase__ , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase__ , index=lowercase__ )
else:
dataset.to_json(lowercase__ )
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : Dict , **lowercase__ : Dict ) -> Tuple:
UpperCamelCase__ :int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCamelCase__ :List[str] = STModelArguments(model_name_or_path=lowercase__ )
UpperCamelCase__ :str = STDataArguments(train_file=lowercase__ , infer_file=lowercase__ )
UpperCamelCase__ :List[Any] = STTrainingArguments(output_dir=lowercase__ )
UpperCamelCase__ :List[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase__ ).items():
setattr(lowercase__ , lowercase__ , lowercase__ )
for key, value in kwargs.items():
if hasattr(lowercase__ , lowercase__ ):
setattr(lowercase__ , lowercase__ , lowercase__ )
# Sanity checks
UpperCamelCase__ :Tuple = {}
UpperCamelCase__ :Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCamelCase__ :Optional[Any] = args.train_file
UpperCamelCase__ :Optional[int] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCamelCase__ :Optional[Any] = args.eval_file
for key in data_files:
UpperCamelCase__ :Dict = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
UpperCamelCase__ :Any = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
UpperCamelCase__ :Optional[int] = f"""{args.output_dir}/self-train_iter-{{}}""".format
UpperCamelCase__ :str = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase__ )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
accelerator.wait_for_everyone()
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :str = None
UpperCamelCase__ :List[str] = 0
UpperCamelCase__ :Optional[int] = False
# Show the progress bar
UpperCamelCase__ :Optional[int] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCamelCase__ :int = data_dir_format(lowercase__ )
assert os.path.exists(lowercase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCamelCase__ :Tuple = os.path.join(lowercase__ , """stage-1""" )
UpperCamelCase__ :Tuple = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase__ , lowercase__ ):
arguments_dict.update({key: value} )
UpperCamelCase__ :Union[str, Any] = os.path.join(lowercase__ , """best-checkpoint""" , lowercase__ )
if os.path.exists(lowercase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , lowercase__ , lowercase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , lowercase__ )
finetune(**lowercase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , lowercase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCamelCase__ :List[Any] = os.path.join(lowercase__ , """best-checkpoint""" )
UpperCamelCase__ :List[Any] = os.path.join(lowercase__ , """stage-2""" )
# Update arguments_dict
UpperCamelCase__ :List[Any] = model_path
UpperCamelCase__ :Any = data_files["""train"""]
UpperCamelCase__ :Optional[int] = current_output_dir
UpperCamelCase__ :str = os.path.join(lowercase__ , """best-checkpoint""" , lowercase__ )
if os.path.exists(lowercase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , lowercase__ , lowercase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , lowercase__ )
finetune(**lowercase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , lowercase__ )
UpperCamelCase__ :Tuple = iteration
UpperCamelCase__ :List[Any] = data_dir_format(iteration + 1 )
UpperCamelCase__ :Optional[Any] = AutoConfig.from_pretrained(os.path.join(lowercase__ , """best-checkpoint""" ) )
UpperCamelCase__ :List[str] = config.idalabel
UpperCamelCase__ :Union[str, Any] = os.path.join(lowercase__ , """eval_results_best-checkpoint.json""" )
UpperCamelCase__ :Dict = os.path.join(lowercase__ , """test_results_best-checkpoint.json""" )
assert os.path.exists(lowercase__ )
with open(lowercase__ , """r""" ) as f:
UpperCamelCase__ :List[Any] = float(json.load(lowercase__ )[args.eval_metric] )
UpperCamelCase__ :Any = os.path.join(lowercase__ , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(lowercase__ )
# Loading the dataset from local csv or json files.
UpperCamelCase__ :Any = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
UpperCamelCase__ :Optional[int] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(lowercase__ , exist_ok=lowercase__ )
shutil.copy(lowercase__ , os.path.join(lowercase__ , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(lowercase__ ):
shutil.copy(lowercase__ , os.path.join(lowercase__ , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.wait_for_everyone()
UpperCamelCase__ :List[str] = os.path.join(lowercase__ , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCamelCase__ :List[Any] = eval_result
if best_iteration is None:
UpperCamelCase__ :Union[str, Any] = new_iteration
UpperCamelCase__ :List[str] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCamelCase__ :Optional[Any] = new_iteration
UpperCamelCase__ :Optional[int] = new_eval_result
UpperCamelCase__ :Optional[int] = 0
else:
if new_eval_result == best_eval_result:
UpperCamelCase__ :str = new_iteration
UpperCamelCase__ :Dict = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCamelCase__ :Optional[int] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , lowercase__ )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase__ , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(lowercase__ , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase__ , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(lowercase__ , """eval_results_best-iteration.json""" ) , )
| 45 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 | 1 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( lowercase__ : np.ndarray ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ) -> np.ndarray:
UpperCamelCase__ :Union[str, Any] = np.nan
for i in range(lowercase__ ):
UpperCamelCase__ :Optional[Any] = features[:, labels == i]
UpperCamelCase__ :Any = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ :Tuple = data - column_reshape(lowercase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowercase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ :str = np.dot(lowercase__ , centered_data.T )
return covariance_sum / features.shape[1]
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ) -> np.ndarray:
UpperCamelCase__ :List[Any] = features.mean(1 )
UpperCamelCase__ :List[Any] = np.nan
for i in range(lowercase__ ):
UpperCamelCase__ :int = features[:, labels == i]
UpperCamelCase__ :Optional[int] = data.shape[1]
UpperCamelCase__ :Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ :Optional[int] = device_data * np.dot(
column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , )
return covariance_sum / features.shape[1]
def A ( lowercase__ : np.ndarray , lowercase__ : int ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
UpperCamelCase__ :List[str] = features.mean(1 )
# Center the dataset
UpperCamelCase__ :List[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) )
UpperCamelCase__ :List[str] = np.dot(lowercase__ , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ :Any = np.linalg.eigh(lowercase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ :int = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ :Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=lowercase__ )
logging.error("""Dataset empty""" )
raise AssertionError
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ :int = eigh(
covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , )
UpperCamelCase__ :Optional[Any] = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = np.linalg.svd(lowercase__ )
UpperCamelCase__ :str = svd_matrix[:, 0:dimensions]
UpperCamelCase__ :Optional[Any] = np.dot(filtered_svd_matrix.T , lowercase__ )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=lowercase__ )
logging.error("""Dataset empty""" )
raise AssertionError
def A ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
UpperCamelCase__ :List[str] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ :int = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :Optional[int] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowercase__ ) as error_info:
UpperCamelCase__ :str = linear_discriminant_analysis(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def A ( ) -> None:
UpperCamelCase__ :Union[str, Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowercase__ ) as error_info:
UpperCamelCase__ :Optional[int] = principal_component_analysis(lowercase__ , lowercase__ )
if not np.allclose(lowercase__ , lowercase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict = """"""
_snake_case : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_snake_case : str = None # compression type in fsspec. ex: "gzip"
_snake_case : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self :Optional[Any] , lowerCamelCase__ :str = "" , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[dict] = None , **lowerCamelCase__ :Dict ):
super().__init__(self , **lowerCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCamelCase__ :Optional[int] = fsspec.open(
lowerCamelCase__ , mode="""rb""" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCamelCase__ :str = os.path.basename(self.file.path.split("""::""" )[0] )
UpperCamelCase__ :List[Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
UpperCamelCase__ :Any = None
@classmethod
def __a ( cls :Any , lowerCamelCase__ :Union[str, Any] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase__ ).lstrip("""/""" )
def __a ( self :List[Any] ):
if self.dir_cache is None:
UpperCamelCase__ :List[str] = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
UpperCamelCase__ :Tuple = {f["""name"""]: f}
def __a ( self :List[Any] , lowerCamelCase__ :str ):
return self.file.open().read()
def __a ( self :str , lowerCamelCase__ :str , lowerCamelCase__ :str = "rb" , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :Any=True , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Any = self._strip_protocol(lowerCamelCase__ )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : str = """bz2"""
_snake_case : Union[str, Any] = """bz2"""
_snake_case : Tuple = """.bz2"""
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : int = """gzip"""
_snake_case : Tuple = """gzip"""
_snake_case : Optional[int] = """.gz"""
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Union[str, Any] = """lz4"""
_snake_case : str = """lz4"""
_snake_case : Tuple = """.lz4"""
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = """xz"""
_snake_case : Union[str, Any] = """xz"""
_snake_case : Optional[Any] = """.xz"""
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = """zstd"""
_snake_case : Optional[int] = """zstd"""
_snake_case : Dict = """.zst"""
def __init__( self :Optional[int] , lowerCamelCase__ :str , lowerCamelCase__ :str = "rb" , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[dict] = None , lowerCamelCase__ :int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ :Dict , ):
super().__init__(
fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCamelCase__ :int = self.file.__enter__
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :int = file_
def __enter__( self :str ):
self._file.__enter__()
return self
def __exit__( self :int , *lowerCamelCase__ :int , **lowerCamelCase__ :Dict ):
self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ )
def __iter__( self :Union[str, Any] ):
return iter(self._file )
def __a ( self :Union[str, Any] ):
return next(self._file )
def __getattr__( self :str , lowerCamelCase__ :Any ):
return getattr(self._file , lowerCamelCase__ )
def fixed_enter(*lowerCamelCase__ :Optional[Any] , **lowerCamelCase__ :int ):
return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) )
UpperCamelCase__ :List[Any] = fixed_enter
| 45 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 | 1 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :float = 0 ):
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = row, column
UpperCamelCase__ :Optional[Any] = [[default_value for c in range(lowerCamelCase__ )] for r in range(lowerCamelCase__ )]
def __str__( self :Union[str, Any] ):
UpperCamelCase__ :Optional[Any] = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
UpperCamelCase__ :str = 0
for row_vector in self.array:
for obj in row_vector:
UpperCamelCase__ :int = max(lowerCamelCase__ , len(str(lowerCamelCase__ ) ) )
UpperCamelCase__ :str = f"""%{max_element_length}s"""
# Make string and return
def single_line(lowerCamelCase__ :list[float] ) -> str:
nonlocal string_format_identifier
UpperCamelCase__ :Optional[int] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase__ ) for row_vector in self.array )
return s
def __repr__( self :Optional[Any] ):
return str(self )
def __a ( self :str , lowerCamelCase__ :tuple[int, int] ):
if not (isinstance(lowerCamelCase__ , (list, tuple) ) and len(lowerCamelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self :Optional[int] , lowerCamelCase__ :tuple[int, int] ):
assert self.validate_indicies(lowerCamelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self :int , lowerCamelCase__ :tuple[int, int] , lowerCamelCase__ :float ):
assert self.validate_indicies(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = value
def __add__( self :int , lowerCamelCase__ :Matrix ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert self.row == another.row and self.column == another.column
# Add
UpperCamelCase__ :Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase__ :Dict = self[r, c] + another[r, c]
return result
def __neg__( self :Dict ):
UpperCamelCase__ :str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase__ :Tuple = -self[r, c]
return result
def __sub__( self :Optional[int] , lowerCamelCase__ :Matrix ):
return self + (-another)
def __mul__( self :Union[str, Any] , lowerCamelCase__ :int | float | Matrix ):
if isinstance(lowerCamelCase__ , (int, float) ): # Scalar multiplication
UpperCamelCase__ :Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase__ :Optional[int] = self[r, c] * another
return result
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): # Matrix multiplication
assert self.column == another.row
UpperCamelCase__ :Optional[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCamelCase__ :List[str] = f"""Unsupported type given for another ({type(lowerCamelCase__ )})"""
raise TypeError(lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Optional[int] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCamelCase__ :Tuple = self[r, c]
return result
def __a ( self :Tuple , lowerCamelCase__ :Matrix , lowerCamelCase__ :Matrix ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCamelCase__ :Optional[int] = v.transpose()
UpperCamelCase__ :Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ) -> None:
# a^(-1)
UpperCamelCase__ :List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCamelCase__ :Optional[Any] = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
UpperCamelCase__ :List[Any] = Matrix(3 , 1 , 0 )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = 1, 2, -3
UpperCamelCase__ :Tuple = Matrix(3 , 1 , 0 )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase__ , lowercase__ )}""" )
def A ( ) -> None:
import doctest
doctest.testmod()
testa()
| 45 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 | 1 |
def A ( lowercase__ : List[Any] , lowercase__ : str ) -> List[str]:
UpperCamelCase__ :int = 0
UpperCamelCase__ :Any = len(lowercase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase__ :Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
UpperCamelCase__ :List[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCamelCase__ :List[Any] = left
UpperCamelCase__ :Tuple = point
elif point > right:
UpperCamelCase__ :str = right
UpperCamelCase__ :Tuple = point
else:
if item < current_item:
UpperCamelCase__ :Dict = point - 1
else:
UpperCamelCase__ :List[Any] = point + 1
return None
def A ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Optional[int] ) -> str:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase__ :Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif point > right:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , lowercase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , point + 1 , lowercase__ )
def A ( lowercase__ : int ) -> str:
if collection != sorted(lowercase__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
UpperCamelCase = 0
if debug == 1:
UpperCamelCase = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
UpperCamelCase = 67
UpperCamelCase = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("Not found")
| 45 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int]=13 , lowerCamelCase__ :str=32 , lowerCamelCase__ :Optional[Any]=3 , lowerCamelCase__ :Union[str, Any]=4 , lowerCamelCase__ :str=[10, 20, 30, 40] , lowerCamelCase__ :Any=[2, 2, 3, 2] , lowerCamelCase__ :List[str]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Tuple=37 , lowerCamelCase__ :int="gelu" , lowerCamelCase__ :Optional[Any]=10 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :int=["stage2", "stage3", "stage4"] , lowerCamelCase__ :Optional[int]=[2, 3, 4] , lowerCamelCase__ :Optional[int]=None , ):
UpperCamelCase__ :Union[str, Any] = parent
UpperCamelCase__ :List[str] = batch_size
UpperCamelCase__ :Dict = image_size
UpperCamelCase__ :Tuple = num_channels
UpperCamelCase__ :Tuple = num_stages
UpperCamelCase__ :Optional[Any] = hidden_sizes
UpperCamelCase__ :Optional[int] = depths
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :Union[str, Any] = use_labels
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :Optional[Any] = num_labels
UpperCamelCase__ :Optional[Any] = initializer_range
UpperCamelCase__ :Union[str, Any] = out_features
UpperCamelCase__ :Any = out_indices
UpperCamelCase__ :int = scope
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __a ( self :List[str] ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :List[Any] = ConvNextModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Union[str, Any] = ConvNextForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :List[str] = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase__ :List[Any] = None
UpperCamelCase__ :Optional[Any] = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :int = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = config_and_inputs
UpperCamelCase__ :str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_snake_case : Dict = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_snake_case : Any = True
_snake_case : Dict = False
_snake_case : Tuple = False
_snake_case : str = False
_snake_case : Any = False
def __a ( self :str ):
UpperCamelCase__ :List[str] = ConvNextModelTester(self )
UpperCamelCase__ :Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def __a ( self :int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self :List[str] ):
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def __a ( self :Dict ):
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def __a ( self :List[str] ):
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def __a ( self :Dict ):
pass
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Dict = model_class(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Tuple = [*signature.parameters.keys()]
UpperCamelCase__ :Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def __a ( self :int ):
def check_hidden_states_output(lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict ):
UpperCamelCase__ :List[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :int = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase__ :Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ :Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :Optional[Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def __a ( self :Any ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :str = ConvNextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> str:
UpperCamelCase__ :Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[int] ):
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Dict = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ :int = model(**lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase , lowercase ):
"""simple docstring"""
_snake_case : List[Any] = (ConvNextBackbone,) if is_torch_available() else ()
_snake_case : Dict = ConvNextConfig
_snake_case : Any = False
def __a ( self :Dict ):
UpperCamelCase__ :Union[str, Any] = ConvNextModelTester(self )
| 45 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase = logging.get_logger(__name__)
def A ( lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[int]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def A ( lowercase__ : np.ndarray , lowercase__ : Optional[str] , lowercase__ : Optional[str] ) -> int:
UpperCamelCase__ :Dict = to_pil_image(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :Tuple = pil_image.size
UpperCamelCase__ :Union[str, Any] = pytesseract.image_to_data(lowercase__ , lang=lowercase__ , output_type="""dict""" , config=lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
UpperCamelCase__ :Optional[int] = [idx for idx, word in enumerate(lowercase__ ) if not word.strip()]
UpperCamelCase__ :List[Any] = [word for idx, word in enumerate(lowercase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ :List[str] = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ :str = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ :List[str] = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ :Tuple = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ :Optional[int] = []
for x, y, w, h in zip(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
UpperCamelCase__ :Tuple = [x, y, x + w, y + h]
actual_boxes.append(lowercase__ )
# finally, normalize the bounding boxes
UpperCamelCase__ :int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase__ , lowercase__ , lowercase__ ) )
assert len(lowercase__ ) == len(lowercase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[str] = ["""pixel_values"""]
def __init__( self :Optional[Any] , lowerCamelCase__ :bool = True , lowerCamelCase__ :Dict[str, int] = None , lowerCamelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ :bool = True , lowerCamelCase__ :float = 1 / 2_55 , lowerCamelCase__ :bool = True , lowerCamelCase__ :Union[float, Iterable[float]] = None , lowerCamelCase__ :Union[float, Iterable[float]] = None , lowerCamelCase__ :bool = True , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[str] = "" , **lowerCamelCase__ :int , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :str = size if size is not None else {"""height""": 2_24, """width""": 2_24}
UpperCamelCase__ :Union[str, Any] = get_size_dict(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Any = resample
UpperCamelCase__ :int = do_rescale
UpperCamelCase__ :Optional[int] = rescale_value
UpperCamelCase__ :Union[str, Any] = do_normalize
UpperCamelCase__ :int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ :Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCamelCase__ :Optional[int] = apply_ocr
UpperCamelCase__ :Tuple = ocr_lang
UpperCamelCase__ :List[Any] = tesseract_config
def __a ( self :Optional[int] , lowerCamelCase__ :np.ndarray , lowerCamelCase__ :Dict[str, int] , lowerCamelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCamelCase__ :Any = (size["""height"""], size["""width"""])
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :int , lowerCamelCase__ :np.ndarray , lowerCamelCase__ :Union[int, float] , lowerCamelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ :int , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :Union[str, Any] , lowerCamelCase__ :np.ndarray , lowerCamelCase__ :Union[float, Iterable[float]] , lowerCamelCase__ :Union[float, Iterable[float]] , lowerCamelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ :Optional[int] , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :Optional[Any] , lowerCamelCase__ :ImageInput , lowerCamelCase__ :bool = None , lowerCamelCase__ :Dict[str, int] = None , lowerCamelCase__ :Tuple=None , lowerCamelCase__ :bool = None , lowerCamelCase__ :float = None , lowerCamelCase__ :bool = None , lowerCamelCase__ :Union[float, Iterable[float]] = None , lowerCamelCase__ :Union[float, Iterable[float]] = None , lowerCamelCase__ :bool = None , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[Union[str, TensorType]] = None , lowerCamelCase__ :ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ :List[Any] , ):
UpperCamelCase__ :str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ :str = size if size is not None else self.size
UpperCamelCase__ :Any = get_size_dict(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = resample if resample is not None else self.resample
UpperCamelCase__ :Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ :Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ :str = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ :str = image_std if image_std is not None else self.image_std
UpperCamelCase__ :List[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ :Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ :Any = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ :Dict = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
UpperCamelCase__ :List[str] = [to_numpy_array(lowerCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :List[Any] = []
for image in images:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = apply_tesseract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
UpperCamelCase__ :List[Any] = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase__ :Any = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
UpperCamelCase__ :str = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
UpperCamelCase__ :int = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
UpperCamelCase__ :int = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowerCamelCase__ )
if apply_ocr:
UpperCamelCase__ :Optional[Any] = words_batch
UpperCamelCase__ :Dict = boxes_batch
return data
| 45 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[Any] = """vit_msn"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Optional[int]=7_68 , lowerCamelCase__ :List[str]=12 , lowerCamelCase__ :Optional[Any]=12 , lowerCamelCase__ :Tuple=30_72 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Any=0.0 , lowerCamelCase__ :List[str]=0.0 , lowerCamelCase__ :str=0.02 , lowerCamelCase__ :str=1e-06 , lowerCamelCase__ :Tuple=2_24 , lowerCamelCase__ :Any=16 , lowerCamelCase__ :int=3 , lowerCamelCase__ :List[str]=True , **lowerCamelCase__ :Union[str, Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :str = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :str = intermediate_size
UpperCamelCase__ :Optional[int] = hidden_act
UpperCamelCase__ :Any = hidden_dropout_prob
UpperCamelCase__ :str = attention_probs_dropout_prob
UpperCamelCase__ :Optional[Any] = initializer_range
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Any = image_size
UpperCamelCase__ :Tuple = patch_size
UpperCamelCase__ :Union[str, Any] = num_channels
UpperCamelCase__ :Tuple = qkv_bias
| 45 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Dict , *lowerCamelCase__ :Tuple , **lowerCamelCase__ :Union[str, Any] ):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A ( lowercase__ : Optional[int] , lowercase__ : Tuple ) -> Optional[int]:
UpperCamelCase__ :List[Any] = XCLIPTextConfig()
# derive patch size from model name
UpperCamelCase__ :Dict = model_name.find("""patch""" )
UpperCamelCase__ :Any = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
UpperCamelCase__ :Dict = XCLIPVisionConfig(patch_size=lowercase__ , num_frames=lowercase__ )
if "large" in model_name:
UpperCamelCase__ :Dict = 768
UpperCamelCase__ :List[str] = 3072
UpperCamelCase__ :int = 12
UpperCamelCase__ :Tuple = 1024
UpperCamelCase__ :Union[str, Any] = 4096
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :Dict = 24
UpperCamelCase__ :Any = 768
UpperCamelCase__ :List[Any] = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCamelCase__ :Optional[Any] = 336
UpperCamelCase__ :List[str] = XCLIPConfig.from_text_vision_configs(lowercase__ , lowercase__ )
if "large" in model_name:
UpperCamelCase__ :List[str] = 768
return config
def A ( lowercase__ : Dict ) -> Optional[int]:
# text encoder
if name == "token_embedding.weight":
UpperCamelCase__ :str = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
UpperCamelCase__ :str = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
UpperCamelCase__ :List[Any] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
UpperCamelCase__ :Tuple = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
UpperCamelCase__ :Optional[int] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
UpperCamelCase__ :Optional[Any] = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
UpperCamelCase__ :Union[str, Any] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
UpperCamelCase__ :int = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
UpperCamelCase__ :Optional[Any] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
UpperCamelCase__ :Union[str, Any] = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
UpperCamelCase__ :Any = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
UpperCamelCase__ :Optional[int] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
UpperCamelCase__ :List[str] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
UpperCamelCase__ :Optional[int] = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
UpperCamelCase__ :Any = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
UpperCamelCase__ :Union[str, Any] = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
UpperCamelCase__ :List[Any] = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
UpperCamelCase__ :List[Any] = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
UpperCamelCase__ :Union[str, Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
UpperCamelCase__ :List[str] = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
UpperCamelCase__ :Optional[Any] = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
UpperCamelCase__ :Optional[Any] = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def A ( lowercase__ : Optional[int] , lowercase__ : Dict ) -> Dict:
for key in orig_state_dict.copy().keys():
UpperCamelCase__ :int = orig_state_dict.pop(lowercase__ )
if "attn.in_proj" in key:
UpperCamelCase__ :Any = key.split(""".""" )
if key.startswith("""visual""" ):
UpperCamelCase__ :str = key_split[3]
UpperCamelCase__ :str = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCamelCase__ :List[str] = val[
:dim, :
]
UpperCamelCase__ :Any = val[
dim : dim * 2, :
]
UpperCamelCase__ :Dict = val[
-dim:, :
]
else:
UpperCamelCase__ :Optional[Any] = val[
:dim
]
UpperCamelCase__ :str = val[
dim : dim * 2
]
UpperCamelCase__ :List[Any] = val[
-dim:
]
else:
if "weight" in key:
UpperCamelCase__ :List[Any] = val[
:dim, :
]
UpperCamelCase__ :Optional[Any] = val[
dim : dim * 2, :
]
UpperCamelCase__ :Union[str, Any] = val[
-dim:, :
]
else:
UpperCamelCase__ :int = val[:dim]
UpperCamelCase__ :List[Any] = val[
dim : dim * 2
]
UpperCamelCase__ :Any = val[-dim:]
elif key.startswith("""mit""" ):
UpperCamelCase__ :Dict = key_split[2]
UpperCamelCase__ :List[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCamelCase__ :Dict = val[:dim, :]
UpperCamelCase__ :Any = val[dim : dim * 2, :]
UpperCamelCase__ :List[str] = val[-dim:, :]
else:
UpperCamelCase__ :Union[str, Any] = val[:dim]
UpperCamelCase__ :List[str] = val[dim : dim * 2]
UpperCamelCase__ :str = val[-dim:]
else:
UpperCamelCase__ :Dict = key_split[2]
UpperCamelCase__ :Tuple = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase__ :List[str] = val[:dim, :]
UpperCamelCase__ :Optional[Any] = val[
dim : dim * 2, :
]
UpperCamelCase__ :Dict = val[-dim:, :]
else:
UpperCamelCase__ :Tuple = val[:dim]
UpperCamelCase__ :Tuple = val[
dim : dim * 2
]
UpperCamelCase__ :Dict = val[-dim:]
else:
UpperCamelCase__ :Dict = rename_key(lowercase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCamelCase__ :Union[str, Any] = val.T
UpperCamelCase__ :int = val
return orig_state_dict
def A ( lowercase__ : Dict ) -> Union[str, Any]:
if num_frames == 8:
UpperCamelCase__ :int = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
UpperCamelCase__ :int = """eating_spaghetti.npy"""
elif num_frames == 32:
UpperCamelCase__ :Any = """eating_spaghetti_32_frames.npy"""
UpperCamelCase__ :List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=lowercase__ , repo_type="""dataset""" , )
UpperCamelCase__ :Optional[Any] = np.load(lowercase__ )
return list(lowercase__ )
def A ( lowercase__ : int , lowercase__ : Dict=None , lowercase__ : List[str]=False ) -> int:
UpperCamelCase__ :Dict = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
UpperCamelCase__ :List[Any] = model_to_url[model_name]
UpperCamelCase__ :Optional[int] = 8
if "16-frames" in model_name:
UpperCamelCase__ :Union[str, Any] = 16
elif "shot" in model_name:
UpperCamelCase__ :Any = 32
UpperCamelCase__ :Dict = get_xclip_config(lowercase__ , lowercase__ )
UpperCamelCase__ :List[Any] = XCLIPModel(lowercase__ )
model.eval()
if "drive" in checkpoint_url:
UpperCamelCase__ :int = """pytorch_model.bin"""
gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ )
UpperCamelCase__ :str = torch.load(lowercase__ , map_location="""cpu""" )["""model"""]
else:
UpperCamelCase__ :Tuple = torch.hub.load_state_dict_from_url(lowercase__ )["""model"""]
UpperCamelCase__ :Optional[int] = convert_state_dict(lowercase__ , lowercase__ )
UpperCamelCase__ :Tuple = XCLIPModel(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :str = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCamelCase__ :Dict = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
UpperCamelCase__ :Union[str, Any] = VideoMAEImageProcessor(size=lowercase__ )
UpperCamelCase__ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
UpperCamelCase__ :Any = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
UpperCamelCase__ :Optional[Any] = XCLIPProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
UpperCamelCase__ :Union[str, Any] = prepare_video(lowercase__ )
UpperCamelCase__ :List[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=lowercase__ , return_tensors="""pt""" , padding=lowercase__ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
UpperCamelCase__ :str = model(**lowercase__ )
# Verify outputs
UpperCamelCase__ :Dict = outputs.logits_per_video
UpperCamelCase__ :Dict = logits_per_video.softmax(dim=1 )
print("""Probs:""" , lowercase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCamelCase__ :Optional[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCamelCase__ :Optional[int] = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
UpperCamelCase__ :Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCamelCase__ :str = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
UpperCamelCase__ :Dict = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCamelCase__ :str = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCamelCase__ :List[str] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCamelCase__ :int = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCamelCase__ :str = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCamelCase__ :Any = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCamelCase__ :List[Any] = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCamelCase__ :Dict = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCamelCase__ :Any = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCamelCase__ :Optional[int] = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCamelCase__ :List[str] = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCamelCase__ :Dict = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCamelCase__ :int = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCamelCase__ :Optional[Any] = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(lowercase__ , organization="""nielsr""" )
processor.push_to_hub(lowercase__ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(lowercase__ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 45 |
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
from datetime import datetime
import requests
def A ( lowercase__ : str ) -> bytes:
UpperCamelCase__ :Any = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
UpperCamelCase__ :Optional[int] = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowercase__ ).content
if __name__ == "__main__":
UpperCamelCase = input("Enter Video/IGTV url: ").strip()
UpperCamelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 45 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( lowercase__ : str , lowercase__ : complex , lowercase__ : str = "x" , lowercase__ : float = 10**-10 , lowercase__ : int = 1 , ) -> complex:
UpperCamelCase__ :Optional[int] = symbols(lowercase__ )
UpperCamelCase__ :Dict = lambdify(lowercase__ , lowercase__ )
UpperCamelCase__ :Any = lambdify(lowercase__ , diff(lowercase__ , lowercase__ ) )
UpperCamelCase__ :List[str] = starting_point
while True:
if diff_function(lowercase__ ) != 0:
UpperCamelCase__ :List[Any] = prev_guess - multiplicity * func(lowercase__ ) / diff_function(
lowercase__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase__ :Optional[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 45 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = ["""input_features""", """attention_mask"""]
def __init__( self :int , lowerCamelCase__ :Optional[int]=80 , lowerCamelCase__ :Any=1_60_00 , lowerCamelCase__ :Union[str, Any]=80 , lowerCamelCase__ :List[Any]=0.0 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :str=True , **lowerCamelCase__ :str , ):
super().__init__(feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = num_mel_bins
UpperCamelCase__ :Dict = do_ceptral_normalize
UpperCamelCase__ :Optional[Any] = normalize_means
UpperCamelCase__ :Optional[int] = normalize_vars
UpperCamelCase__ :List[Any] = True
def __a ( self :Any , lowerCamelCase__ :np.ndarray , ):
UpperCamelCase__ :Union[str, Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCamelCase__ :str = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 )
UpperCamelCase__ :Tuple = ta_kaldi.fbank(lowerCamelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __a ( lowerCamelCase__ :np.ndarray , lowerCamelCase__ :int , lowerCamelCase__ :Optional[bool] = True , lowerCamelCase__ :Optional[bool] = True , lowerCamelCase__ :float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
UpperCamelCase__ :Any = x[:input_length].mean(axis=0 )
UpperCamelCase__ :int = np.subtract(lowerCamelCase__ , lowerCamelCase__ )
if normalize_vars:
UpperCamelCase__ :str = x[:input_length].std(axis=0 )
UpperCamelCase__ :str = np.divide(lowerCamelCase__ , lowerCamelCase__ )
if input_length < x.shape[0]:
UpperCamelCase__ :List[str] = padding_value
# make sure array is in float32
UpperCamelCase__ :Optional[int] = x.astype(np.floataa )
return x
def __a ( self :Optional[Any] , lowerCamelCase__ :List[np.ndarray] , lowerCamelCase__ :Optional[np.ndarray] = None ):
UpperCamelCase__ :List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCamelCase__ , lowerCamelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCamelCase__ , lowerCamelCase__ )
]
def __call__( self :List[Any] , lowerCamelCase__ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ :Optional[int] = None , lowerCamelCase__ :bool = False , lowerCamelCase__ :Optional[int] = None , lowerCamelCase__ :Optional[Union[str, TensorType]] = None , lowerCamelCase__ :Optional[int] = None , lowerCamelCase__ :Optional[bool] = None , **lowerCamelCase__ :Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCamelCase__ :Union[str, Any] = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase__ :Union[str, Any] = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase__ :List[str] = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
UpperCamelCase__ :str = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase__ :Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase__ :List[Any] = [raw_speech]
# extract fbank features
UpperCamelCase__ :Dict = [self._extract_fbank_features(lowerCamelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase__ :Optional[Any] = BatchFeature({"""input_features""": features} )
UpperCamelCase__ :List[Any] = self.pad(
lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
# make sure list is in array format
UpperCamelCase__ :List[str] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , lowerCamelCase__ ):
UpperCamelCase__ :int = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_features]
UpperCamelCase__ :Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCamelCase__ :List[str] = [np.asarray(lowerCamelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCamelCase__ :int = (
np.array(lowerCamelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase__ , max_length=lowerCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase__ :int = self.normalize(
padded_inputs["""input_features"""] , attention_mask=lowerCamelCase__ )
if return_tensors is not None:
UpperCamelCase__ :List[str] = padded_inputs.convert_to_tensors(lowerCamelCase__ )
return padded_inputs
| 45 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 | 1 |
from __future__ import annotations
def A ( lowercase__ : list[int] , lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :Any = len(lowercase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase__ :Union[str, Any] = i + 1
else:
UpperCamelCase__ :Optional[int] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 | 1 |
from __future__ import annotations
from collections.abc import Callable
def A ( lowercase__ : Callable[[int | float], int | float] , lowercase__ : int | float , lowercase__ : int | float , lowercase__ : int = 100 , ) -> float:
UpperCamelCase__ :List[Any] = x_start
UpperCamelCase__ :Any = fnc(lowercase__ )
UpperCamelCase__ :Optional[Any] = 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase__ :Dict = (x_end - x_start) / steps + xa
UpperCamelCase__ :Dict = fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase__ :Optional[Any] = xa
UpperCamelCase__ :Optional[Any] = fxa
return area
if __name__ == "__main__":
def A ( lowercase__ : Dict ) -> List[Any]:
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
UpperCamelCase = 10
while i <= 100_000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 45 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def A ( lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
UpperCamelCase__ :str = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type="""dataset""" , revision=lowercase__ )
| 45 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 | 1 |
import os
def A ( lowercase__ : str = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) as input_file:
UpperCamelCase__ :Any = [
[int(lowercase__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
UpperCamelCase__ :int = len(lowercase__ )
UpperCamelCase__ :Union[str, Any] = len(matrix[0] )
UpperCamelCase__ :Optional[int] = [[-1 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
for i in range(lowercase__ ):
UpperCamelCase__ :List[Any] = matrix[i][0]
for j in range(1 , lowercase__ ):
for i in range(lowercase__ ):
UpperCamelCase__ :List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowercase__ ):
UpperCamelCase__ :Optional[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCamelCase__ :Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 | 1 |
from string import ascii_uppercase
UpperCamelCase = {str(ord(c) - 55): c for c in ascii_uppercase}
def A ( lowercase__ : int , lowercase__ : int ) -> str:
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
UpperCamelCase__ :Optional[int] = """"""
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :Dict = 0
while div != 1:
UpperCamelCase__ , UpperCamelCase__ :Tuple = divmod(lowercase__ , lowercase__ )
if base >= 11 and 9 < mod < 36:
UpperCamelCase__ :Tuple = ALPHABET_VALUES[str(lowercase__ )]
else:
UpperCamelCase__ :Dict = str(lowercase__ )
new_value += actual_value
UpperCamelCase__ :Union[str, Any] = num // base
UpperCamelCase__ :Optional[int] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(lowercase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Any = UnCLIPImageVariationPipeline
_snake_case : int = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
_snake_case : Optional[int] = IMAGE_VARIATION_BATCH_PARAMS
_snake_case : List[Any] = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
_snake_case : Optional[Any] = False
@property
def __a ( self :Optional[int] ):
return 32
@property
def __a ( self :Tuple ):
return 32
@property
def __a ( self :Tuple ):
return self.time_input_dim
@property
def __a ( self :Any ):
return self.time_input_dim * 4
@property
def __a ( self :Any ):
return 1_00
@property
def __a ( self :str ):
UpperCamelCase__ :Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __a ( self :List[Any] ):
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(lowerCamelCase__ )
@property
def __a ( self :Tuple ):
torch.manual_seed(0 )
UpperCamelCase__ :List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowerCamelCase__ )
@property
def __a ( self :int ):
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
UpperCamelCase__ :Optional[int] = UnCLIPTextProjModel(**lowerCamelCase__ )
return model
@property
def __a ( self :Any ):
torch.manual_seed(0 )
UpperCamelCase__ :Optional[Any] = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
UpperCamelCase__ :str = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def __a ( self :List[str] ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __a ( self :Optional[Any] ):
torch.manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __a ( self :Union[str, Any] ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
UpperCamelCase__ :List[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __a ( self :Any ):
UpperCamelCase__ :List[str] = self.dummy_decoder
UpperCamelCase__ :List[Any] = self.dummy_text_proj
UpperCamelCase__ :str = self.dummy_text_encoder
UpperCamelCase__ :Any = self.dummy_tokenizer
UpperCamelCase__ :str = self.dummy_super_res_first
UpperCamelCase__ :Union[str, Any] = self.dummy_super_res_last
UpperCamelCase__ :List[Any] = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=10_00 , )
UpperCamelCase__ :Any = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=10_00 , )
UpperCamelCase__ :Tuple = CLIPImageProcessor(crop_size=32 , size=32 )
UpperCamelCase__ :int = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __a ( self :str , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any]=0 , lowerCamelCase__ :Tuple=True ):
UpperCamelCase__ :Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("""mps""" ):
UpperCamelCase__ :List[Any] = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ :List[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
if pil_image:
UpperCamelCase__ :Dict = input_image * 0.5 + 0.5
UpperCamelCase__ :str = input_image.clamp(0 , 1 )
UpperCamelCase__ :Any = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ :int = DiffusionPipeline.numpy_to_pil(lowerCamelCase__ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __a ( self :int ):
UpperCamelCase__ :List[str] = """cpu"""
UpperCamelCase__ :Tuple = self.get_dummy_components()
UpperCamelCase__ :List[str] = self.pipeline_class(**lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Tuple = self.get_dummy_inputs(lowerCamelCase__ , pil_image=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = pipe(**lowerCamelCase__ )
UpperCamelCase__ :List[Any] = output.images
UpperCamelCase__ :int = self.get_dummy_inputs(lowerCamelCase__ , pil_image=lowerCamelCase__ )
UpperCamelCase__ :List[str] = pipe(
**lowerCamelCase__ , return_dict=lowerCamelCase__ , )[0]
UpperCamelCase__ :int = image[0, -3:, -3:, -1]
UpperCamelCase__ :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ :Optional[int] = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = """cpu"""
UpperCamelCase__ :Dict = self.get_dummy_components()
UpperCamelCase__ :Tuple = self.pipeline_class(**lowerCamelCase__ )
UpperCamelCase__ :str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = self.get_dummy_inputs(lowerCamelCase__ , pil_image=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = pipe(**lowerCamelCase__ )
UpperCamelCase__ :Dict = output.images
UpperCamelCase__ :Optional[int] = self.get_dummy_inputs(lowerCamelCase__ , pil_image=lowerCamelCase__ )
UpperCamelCase__ :Tuple = pipe(
**lowerCamelCase__ , return_dict=lowerCamelCase__ , )[0]
UpperCamelCase__ :List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ :Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Union[str, Any] = """cpu"""
UpperCamelCase__ :Optional[Any] = self.get_dummy_components()
UpperCamelCase__ :Union[str, Any] = self.pipeline_class(**lowerCamelCase__ )
UpperCamelCase__ :int = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :List[str] = self.get_dummy_inputs(lowerCamelCase__ , pil_image=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
UpperCamelCase__ :Dict = pipe(**lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = output.images
UpperCamelCase__ :Optional[int] = self.get_dummy_inputs(lowerCamelCase__ , pil_image=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
UpperCamelCase__ :Dict = pipe(
**lowerCamelCase__ , return_dict=lowerCamelCase__ , )[0]
UpperCamelCase__ :Dict = image[0, -3:, -3:, -1]
UpperCamelCase__ :Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
UpperCamelCase__ :List[Any] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = torch.device("""cpu""" )
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : int = 1
UpperCamelCase__ :Union[str, Any] = self.get_dummy_components()
UpperCamelCase__ :List[str] = self.pipeline_class(**lowerCamelCase__ )
UpperCamelCase__ :Dict = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :int = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
UpperCamelCase__ :List[Any] = pipe.decoder.dtype
UpperCamelCase__ :str = 1
UpperCamelCase__ :List[str] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCamelCase__ :int = pipe.prepare_latents(
lowerCamelCase__ , dtype=lowerCamelCase__ , device=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , scheduler=DummyScheduler() )
UpperCamelCase__ :int = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCamelCase__ :Optional[int] = pipe.prepare_latents(
lowerCamelCase__ , dtype=lowerCamelCase__ , device=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , scheduler=DummyScheduler() )
UpperCamelCase__ :Any = self.get_dummy_inputs(lowerCamelCase__ , pil_image=lowerCamelCase__ )
UpperCamelCase__ :int = pipe(
**lowerCamelCase__ , decoder_latents=lowerCamelCase__ , super_res_latents=lowerCamelCase__ ).images
UpperCamelCase__ :int = self.get_dummy_inputs(lowerCamelCase__ , pil_image=lowerCamelCase__ )
# Don't pass image, instead pass embedding
UpperCamelCase__ :List[Any] = pipeline_inputs.pop("""image""" )
UpperCamelCase__ :str = pipe.image_encoder(lowerCamelCase__ ).image_embeds
UpperCamelCase__ :List[Any] = pipe(
**lowerCamelCase__ , decoder_latents=lowerCamelCase__ , super_res_latents=lowerCamelCase__ , image_embeddings=lowerCamelCase__ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCamelCase__ :Union[str, Any] = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCamelCase__ , expected_max_diff=lowerCamelCase__ )
@skip_mps
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Tuple = torch_device == """cpu"""
UpperCamelCase__ :Optional[int] = True
UpperCamelCase__ :Union[str, Any] = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , additional_params_copy_to_batched_inputs=lowerCamelCase__ , )
def __a ( self :List[str] ):
UpperCamelCase__ :Any = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCamelCase__ :List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowerCamelCase__ , additional_params_copy_to_batched_inputs=lowerCamelCase__ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowerCamelCase__ )
@skip_mps
def __a ( self :List[str] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __a ( self :Dict ):
return super().test_save_load_local()
@skip_mps
def __a ( self :Tuple ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Tuple ):
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
UpperCamelCase__ :List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
UpperCamelCase__ :int = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
UpperCamelCase__ :Optional[int] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase__ :List[str] = pipeline(
lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""np""" , )
UpperCamelCase__ :Union[str, Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ , 15 )
| 45 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase = 16
UpperCamelCase = 32
def A ( lowercase__ : Accelerator , lowercase__ : int = 16 ) -> Dict:
UpperCamelCase__ :Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase__ :Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Dict = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ :Union[str, Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ :List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ :int = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ :List[Any] = 8
else:
UpperCamelCase__ :Any = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase__ :Tuple = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
UpperCamelCase__ :List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase = mocked_dataloaders # noqa: F811
def A ( lowercase__ : str , lowercase__ : Optional[int] ) -> Tuple:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
UpperCamelCase__ :List[str] = 2
# Initialize accelerator
UpperCamelCase__ :Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Tuple = config["""lr"""]
UpperCamelCase__ :List[str] = int(config["""num_epochs"""] )
UpperCamelCase__ :List[str] = int(config["""seed"""] )
UpperCamelCase__ :List[str] = int(config["""batch_size"""] )
UpperCamelCase__ :Any = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase__ )
def inner_training_loop(lowercase__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ :List[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ :Any = AdamW(params=model.parameters() , lr=lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :Tuple = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate scheduler
UpperCamelCase__ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=100 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase__ :Dict = model(**lowercase__ )
UpperCamelCase__ :Optional[int] = outputs.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :List[Any] = model(**lowercase__ )
UpperCamelCase__ :Optional[int] = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
UpperCamelCase__ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def A ( ) -> List[Any]:
UpperCamelCase__ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase__ :Any = parser.parse_args()
UpperCamelCase__ :List[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 45 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 | 1 |
from __future__ import annotations
import time
import numpy as np
UpperCamelCase = [8, 5, 9, 7]
UpperCamelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCamelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :list[int] , lowerCamelCase__ :list[list[int]] , lowerCamelCase__ :list[list[int]] , ):
UpperCamelCase__ :Optional[Any] = claim_vector
UpperCamelCase__ :str = allocated_resources_table
UpperCamelCase__ :Optional[Any] = maximum_claim_table
def __a ( self :str ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __a ( self :Union[str, Any] ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __a ( self :List[Any] ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowerCamelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __a ( self :int ):
return {self.__need().index(lowerCamelCase__ ): i for i in self.__need()}
def __a ( self :Any , **lowerCamelCase__ :str ):
UpperCamelCase__ :Dict = self.__need()
UpperCamelCase__ :Any = self.__allocated_resources_table
UpperCamelCase__ :Tuple = self.__available_resources()
UpperCamelCase__ :List[str] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
UpperCamelCase__ :List[str] = False
for each_need in need_list:
UpperCamelCase__ :List[Any] = True
for index, need in enumerate(lowerCamelCase__ ):
if need > available_resources[index]:
UpperCamelCase__ :int = False
break
if execution:
UpperCamelCase__ :Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase__ :Optional[Any] = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(lowerCamelCase__ )
# update available/freed resources stack
UpperCamelCase__ :Optional[Any] = np.array(lowerCamelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(lowerCamelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def __a ( self :Dict ):
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(lowerCamelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(lowerCamelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(lowerCamelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(lowerCamelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[Any] = """xglm"""
_snake_case : int = ["""past_key_values"""]
_snake_case : int = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Any , lowerCamelCase__ :int=25_60_08 , lowerCamelCase__ :str=20_48 , lowerCamelCase__ :Dict=10_24 , lowerCamelCase__ :Union[str, Any]=40_96 , lowerCamelCase__ :Tuple=24 , lowerCamelCase__ :Optional[int]=16 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :int=0.0 , lowerCamelCase__ :Dict=0.0 , lowerCamelCase__ :Dict=0.02 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=True , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :Optional[int]=1 , lowerCamelCase__ :Tuple=0 , lowerCamelCase__ :Dict=2 , **lowerCamelCase__ :int , ):
UpperCamelCase__ :str = vocab_size
UpperCamelCase__ :Optional[Any] = max_position_embeddings
UpperCamelCase__ :List[Any] = d_model
UpperCamelCase__ :int = ffn_dim
UpperCamelCase__ :List[str] = num_layers
UpperCamelCase__ :List[Any] = attention_heads
UpperCamelCase__ :List[Any] = activation_function
UpperCamelCase__ :Tuple = dropout
UpperCamelCase__ :str = attention_dropout
UpperCamelCase__ :Optional[Any] = activation_dropout
UpperCamelCase__ :List[Any] = layerdrop
UpperCamelCase__ :Optional[Any] = init_std
UpperCamelCase__ :Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ :str = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 45 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : str = """data2vec-audio"""
def __init__( self :List[Any] , lowerCamelCase__ :List[str]=32 , lowerCamelCase__ :Optional[Any]=7_68 , lowerCamelCase__ :int=12 , lowerCamelCase__ :List[Any]=12 , lowerCamelCase__ :List[str]=30_72 , lowerCamelCase__ :int="gelu" , lowerCamelCase__ :Tuple=0.1 , lowerCamelCase__ :List[str]=0.1 , lowerCamelCase__ :Dict=0.1 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :Dict=0.02 , lowerCamelCase__ :List[str]=1e-5 , lowerCamelCase__ :Union[str, Any]="gelu" , lowerCamelCase__ :Optional[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase__ :int=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase__ :Dict=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :List[Any]=16 , lowerCamelCase__ :Union[str, Any]=19 , lowerCamelCase__ :int=5 , lowerCamelCase__ :Dict=0.05 , lowerCamelCase__ :Union[str, Any]=10 , lowerCamelCase__ :Optional[int]=2 , lowerCamelCase__ :int=0.0 , lowerCamelCase__ :Dict=10 , lowerCamelCase__ :List[str]=0 , lowerCamelCase__ :Dict="sum" , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :int=False , lowerCamelCase__ :Dict=2_56 , lowerCamelCase__ :Any=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCamelCase__ :int=(5, 3, 3, 1, 1) , lowerCamelCase__ :Union[str, Any]=(1, 2, 3, 1, 1) , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=0 , lowerCamelCase__ :str=1 , lowerCamelCase__ :Optional[int]=2 , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :Optional[Any]=3 , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=None , **lowerCamelCase__ :Any , ):
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :int = feat_extract_activation
UpperCamelCase__ :str = list(lowerCamelCase__ )
UpperCamelCase__ :Tuple = list(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = list(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = conv_bias
UpperCamelCase__ :Any = num_conv_pos_embeddings
UpperCamelCase__ :Optional[Any] = num_conv_pos_embedding_groups
UpperCamelCase__ :Optional[Any] = conv_pos_kernel_size
UpperCamelCase__ :int = len(self.conv_dim )
UpperCamelCase__ :List[str] = num_hidden_layers
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :int = hidden_dropout
UpperCamelCase__ :Dict = attention_dropout
UpperCamelCase__ :Any = activation_dropout
UpperCamelCase__ :int = feat_proj_dropout
UpperCamelCase__ :List[Any] = final_dropout
UpperCamelCase__ :List[Any] = layerdrop
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Any = initializer_range
UpperCamelCase__ :Any = vocab_size
UpperCamelCase__ :str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ :Tuple = mask_time_prob
UpperCamelCase__ :Union[str, Any] = mask_time_length
UpperCamelCase__ :Any = mask_time_min_masks
UpperCamelCase__ :Tuple = mask_feature_prob
UpperCamelCase__ :Optional[Any] = mask_feature_length
UpperCamelCase__ :Tuple = mask_feature_min_masks
# ctc loss
UpperCamelCase__ :List[str] = ctc_loss_reduction
UpperCamelCase__ :Optional[int] = ctc_zero_infinity
# adapter
UpperCamelCase__ :Optional[int] = add_adapter
UpperCamelCase__ :List[str] = adapter_kernel_size
UpperCamelCase__ :Dict = adapter_stride
UpperCamelCase__ :Optional[Any] = num_adapter_layers
UpperCamelCase__ :Union[str, Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ :Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ :List[str] = list(lowerCamelCase__ )
UpperCamelCase__ :int = list(lowerCamelCase__ )
UpperCamelCase__ :List[str] = list(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = xvector_output_dim
@property
def __a ( self :Optional[Any] ):
return math.prod(self.conv_stride )
| 45 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 | 1 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A ( lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :List[str] = [False] * len(lowercase__ )
UpperCamelCase__ :List[str] = [-1] * len(lowercase__ )
def dfs(lowercase__ : Optional[Any] , lowercase__ : List[str] ):
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :List[str] = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase__ , 1 - c )
for i in range(len(lowercase__ ) ):
if not visited[i]:
dfs(lowercase__ , 0 )
for i in range(len(lowercase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 45 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 | 1 |
import itertools
import math
def A ( lowercase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( ) -> Optional[int]:
UpperCamelCase__ :List[str] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def A ( lowercase__ : int = 1_0001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = """distilbert"""
_snake_case : Tuple = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self :Dict , lowerCamelCase__ :Tuple=3_05_22 , lowerCamelCase__ :List[str]=5_12 , lowerCamelCase__ :Union[str, Any]=False , lowerCamelCase__ :Union[str, Any]=6 , lowerCamelCase__ :Optional[Any]=12 , lowerCamelCase__ :str=7_68 , lowerCamelCase__ :List[str]=4 * 7_68 , lowerCamelCase__ :Optional[Any]=0.1 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :List[Any]="gelu" , lowerCamelCase__ :Any=0.02 , lowerCamelCase__ :Optional[Any]=0.1 , lowerCamelCase__ :Dict=0.2 , lowerCamelCase__ :List[str]=0 , **lowerCamelCase__ :Any , ):
UpperCamelCase__ :Dict = vocab_size
UpperCamelCase__ :Optional[Any] = max_position_embeddings
UpperCamelCase__ :Any = sinusoidal_pos_embds
UpperCamelCase__ :Union[str, Any] = n_layers
UpperCamelCase__ :Any = n_heads
UpperCamelCase__ :List[Any] = dim
UpperCamelCase__ :Optional[Any] = hidden_dim
UpperCamelCase__ :List[Any] = dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[Any] = activation
UpperCamelCase__ :Any = initializer_range
UpperCamelCase__ :Any = qa_dropout
UpperCamelCase__ :str = seq_classif_dropout
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
@property
def __a ( self :str ):
if self.task == "multiple-choice":
UpperCamelCase__ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase__ :List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 45 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCAmelCase_ ( unittest.TestCase , lowercase ):
"""simple docstring"""
def __a ( self :int ):
UpperCamelCase__ :Dict = load_tool("""text-to-speech""" )
self.tool.setup()
def __a ( self :Union[str, Any] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = self.tool("""hey""" )
UpperCamelCase__ :Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __a ( self :int ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = self.tool("""hey""" )
UpperCamelCase__ :Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 45 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 | 1 |
def A ( lowercase__ : float , lowercase__ : list[float] ) -> float:
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
UpperCamelCase__ :Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowercase__ ) )
return round(lowercase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 | 1 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = "▁"
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = BigBirdTokenizer
_snake_case : List[Any] = BigBirdTokenizerFast
_snake_case : Any = True
_snake_case : Optional[int] = True
def __a ( self :Union[str, Any] ):
super().setUp()
UpperCamelCase__ :List[Any] = self.tokenizer_class(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self :str ):
UpperCamelCase__ :List[str] = """<s>"""
UpperCamelCase__ :str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(lowerCamelCase__ ) , 10_04 )
def __a ( self :Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __a ( self :Optional[Any] ):
if not self.test_rust_tokenizer:
return
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :str = self.get_rust_tokenizer()
UpperCamelCase__ :List[Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase__ :List[str] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = self.get_rust_tokenizer()
UpperCamelCase__ :Any = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ :Tuple = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Dict = BigBirdTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
UpperCamelCase__ :str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCamelCase__ :List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase__ :Tuple = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCamelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __a ( self :Dict ):
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def __a ( self :List[str] ):
UpperCamelCase__ :Dict = """Hello World!"""
UpperCamelCase__ :Any = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
UpperCamelCase__ :Any = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@require_torch
@slow
def __a ( self :str ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCamelCase__ :Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCamelCase__ :Optional[Any] = """ """.join(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = self.big_tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = BigBirdConfig(attention_type="""original_full""" )
UpperCamelCase__ :List[str] = BigBirdModel(lowerCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase__ )
model(**lowerCamelCase__ )
@slow
def __a ( self :List[str] ):
UpperCamelCase__ :Any = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
UpperCamelCase__ :Any = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def __a ( self :Union[str, Any] ):
# fmt: off
UpperCamelCase__ :int = {"""input_ids""": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 45 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase = 16
UpperCamelCase = 32
def A ( lowercase__ : Union[str, Any] ) -> int:
return int(x / 2**20 )
class lowerCAmelCase_ :
"""simple docstring"""
def __enter__( self :Tuple ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCamelCase__ :Optional[Any] = torch.cuda.memory_allocated()
return self
def __exit__( self :Optional[int] , *lowerCamelCase__ :Dict ):
gc.collect()
torch.cuda.empty_cache()
UpperCamelCase__ :List[Any] = torch.cuda.memory_allocated()
UpperCamelCase__ :List[Any] = torch.cuda.max_memory_allocated()
UpperCamelCase__ :Any = bamb(self.end - self.begin )
UpperCamelCase__ :Union[str, Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A ( lowercase__ : Accelerator , lowercase__ : int = 16 , lowercase__ : str = "bert-base-cased" , lowercase__ : int = 320 , lowercase__ : int = 160 , ) -> Union[str, Any]:
UpperCamelCase__ :Tuple = AutoTokenizer.from_pretrained(lowercase__ )
UpperCamelCase__ :Optional[Any] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f"""train[:{n_train}]""", """validation""": f"""validation[:{n_val}]"""} )
def tokenize_function(lowercase__ : Any ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :Any = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def A ( lowercase__ : List[str] , lowercase__ : Dict ) -> int:
# Initialize accelerator
UpperCamelCase__ :Dict = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Dict = config["""lr"""]
UpperCamelCase__ :Optional[int] = int(config["""num_epochs"""] )
UpperCamelCase__ :Union[str, Any] = int(config["""seed"""] )
UpperCamelCase__ :int = int(config["""batch_size"""] )
UpperCamelCase__ :str = args.model_name_or_path
set_seed(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
UpperCamelCase__ :str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ :Optional[Any] = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ :Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase__ :int = 1
UpperCamelCase__ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
UpperCamelCase__ :Dict = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ :str = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ :List[Any] = 0
# Now we train the model
UpperCamelCase__ :Optional[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowercase__ ):
UpperCamelCase__ :List[Any] = model(**lowercase__ )
UpperCamelCase__ :Optional[Any] = outputs.loss
UpperCamelCase__ :str = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCamelCase__ :Dict = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowercase__ , default=lowercase__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowercase__ , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowercase__ , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=1 , help="""Number of train epochs.""" , )
UpperCamelCase__ :List[str] = parser.parse_args()
UpperCamelCase__ :List[str] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 45 |
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case : bool = field(default=lowercase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_snake_case : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def A ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
UpperCamelCase__ :Optional[Any] = import_module("""tasks""" )
try:
UpperCamelCase__ :Tuple = getattr(lowercase__ , model_args.task_type )
UpperCamelCase__ :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCamelCase__ :Dict = token_classification_task.get_labels(data_args.labels )
UpperCamelCase__ :Dict[int, str] = dict(enumerate(lowercase__ ) )
UpperCamelCase__ :Any = len(lowercase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , idalabel=lowercase__ , labelaid={label: i for i, label in enumerate(lowercase__ )} , cache_dir=model_args.cache_dir , )
UpperCamelCase__ :Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCamelCase__ :List[str] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase__ :List[str] = (
TokenClassificationDataset(
token_classification_task=lowercase__ , data_dir=data_args.data_dir , tokenizer=lowercase__ , labels=lowercase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase__ :Tuple = (
TokenClassificationDataset(
token_classification_task=lowercase__ , data_dir=data_args.data_dir , tokenizer=lowercase__ , labels=lowercase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> Tuple[List[int], List[int]]:
UpperCamelCase__ :List[Any] = np.argmax(lowercase__ , axis=2 )
UpperCamelCase__ , UpperCamelCase__ :int = preds.shape
UpperCamelCase__ :Optional[Any] = [[] for _ in range(lowercase__ )]
UpperCamelCase__ :Optional[int] = [[] for _ in range(lowercase__ )]
for i in range(lowercase__ ):
for j in range(lowercase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowercase__ : EvalPrediction ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ :str = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowercase__ , lowercase__ ),
"precision": precision_score(lowercase__ , lowercase__ ),
"recall": recall_score(lowercase__ , lowercase__ ),
"f1": fa_score(lowercase__ , lowercase__ ),
}
# Data collator
UpperCamelCase__ :int = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase__ :Dict = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ :Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase__ :List[str] = trainer.evaluate()
UpperCamelCase__ :str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowercase__ , lowercase__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowercase__ )
# Predict
if training_args.do_predict:
UpperCamelCase__ :Dict = TokenClassificationDataset(
token_classification_task=lowercase__ , data_dir=data_args.data_dir , tokenizer=lowercase__ , labels=lowercase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Any = trainer.predict(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = align_predictions(lowercase__ , lowercase__ )
UpperCamelCase__ :Dict = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , lowercase__ , lowercase__ )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
UpperCamelCase__ :int = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(lowercase__ , lowercase__ , lowercase__ )
return results
def A ( lowercase__ : int ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 | 1 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"nielsr/canine-s": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCamelCase = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCamelCase = 0
UpperCamelCase = 0xe0_00
UpperCamelCase = 0xe0_01
UpperCamelCase = 0xe0_02
UpperCamelCase = 0xe0_03
UpperCamelCase = 0xe0_04
# Maps special codepoints to human-readable names.
UpperCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :Tuple , lowerCamelCase__ :Optional[int]=chr(lowerCamelCase__ ) , lowerCamelCase__ :Optional[Any]=chr(lowerCamelCase__ ) , lowerCamelCase__ :Optional[Any]=chr(lowerCamelCase__ ) , lowerCamelCase__ :Dict=chr(lowerCamelCase__ ) , lowerCamelCase__ :List[Any]=chr(lowerCamelCase__ ) , lowerCamelCase__ :Dict=chr(lowerCamelCase__ ) , lowerCamelCase__ :Union[str, Any]=False , lowerCamelCase__ :int=20_48 , **lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
UpperCamelCase__ :List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
UpperCamelCase__ :Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
UpperCamelCase__ :Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
UpperCamelCase__ :Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ :str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , model_max_length=lowerCamelCase__ , **lowerCamelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCamelCase__ :Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCamelCase__ :str = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCamelCase__ :Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCamelCase__ :Any = UNICODE_VOCAB_SIZE
UpperCamelCase__ :List[str] = len(self._special_codepoints )
@property
def __a ( self :Any ):
return self._unicode_vocab_size
def __a ( self :Optional[int] , lowerCamelCase__ :str ):
return list(lowerCamelCase__ )
def __a ( self :List[str] , lowerCamelCase__ :str ):
try:
return ord(lowerCamelCase__ )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def __a ( self :Any , lowerCamelCase__ :int ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCamelCase__ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] ):
return "".join(lowerCamelCase__ )
def __a ( self :Optional[int] , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
UpperCamelCase__ :Union[str, Any] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
UpperCamelCase__ :Dict = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __a ( self :Tuple , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None , lowerCamelCase__ :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Tuple = [1] + ([0] * len(lowerCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCamelCase__ )) + [1]
return result
def __a ( self :int , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
UpperCamelCase__ :List[Any] = [self.sep_token_id]
UpperCamelCase__ :Optional[Any] = [self.cls_token_id]
UpperCamelCase__ :Tuple = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Optional[str] = None ):
return ()
| 45 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 | 1 |
from __future__ import annotations
import numpy as np
def A ( lowercase__ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
UpperCamelCase__ , UpperCamelCase__ :str = np.shape(lowercase__ )
if rows != columns:
UpperCamelCase__ :str = (
"""'table' has to be of square shaped array but got a """
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(lowercase__ )
UpperCamelCase__ :Union[str, Any] = np.zeros((rows, columns) )
UpperCamelCase__ :int = np.zeros((rows, columns) )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
UpperCamelCase__ :List[str] = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
UpperCamelCase__ :int = (table[i][j] - total) / upper[j][j]
UpperCamelCase__ :Optional[Any] = 1
for j in range(lowercase__ , lowercase__ ):
UpperCamelCase__ :int = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
UpperCamelCase__ :Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 | 1 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 | 1 |
from math import isqrt
def A ( lowercase__ : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def A ( lowercase__ : int = 10**6 ) -> int:
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :List[Any] = 1
UpperCamelCase__ :Any = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[Any] = """deit"""
def __init__( self :Optional[Any] , lowerCamelCase__ :List[str]=7_68 , lowerCamelCase__ :int=12 , lowerCamelCase__ :Any=12 , lowerCamelCase__ :Optional[Any]=30_72 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :str=0.0 , lowerCamelCase__ :Tuple=0.0 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :Tuple=1e-12 , lowerCamelCase__ :Union[str, Any]=2_24 , lowerCamelCase__ :List[Any]=16 , lowerCamelCase__ :List[Any]=3 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :str=16 , **lowerCamelCase__ :str , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :Dict = num_hidden_layers
UpperCamelCase__ :Optional[int] = num_attention_heads
UpperCamelCase__ :Optional[Any] = intermediate_size
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :Dict = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :str = initializer_range
UpperCamelCase__ :int = layer_norm_eps
UpperCamelCase__ :Any = image_size
UpperCamelCase__ :int = patch_size
UpperCamelCase__ :List[str] = num_channels
UpperCamelCase__ :Any = qkv_bias
UpperCamelCase__ :Tuple = encoder_stride
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : int = version.parse("""1.11""" )
@property
def __a ( self :List[str] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __a ( self :Union[str, Any] ):
return 1e-4
| 45 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :str , lowerCamelCase__ :List[str]=False , lowerCamelCase__ :Optional[int]=None , lowerCamelCase__ :int=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[Any]=True , ):
UpperCamelCase__ :Tuple = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowerCamelCase__ , num_train_epochs=1 , distributed=lowerCamelCase__ , extra_args_str=lowerCamelCase__ , predict_with_generate=lowerCamelCase__ , do_train=lowerCamelCase__ , do_eval=lowerCamelCase__ , do_predict=lowerCamelCase__ , )
UpperCamelCase__ :str = TrainerState.load_from_json(os.path.join(lowerCamelCase__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
UpperCamelCase__ :int = [log for log in logs if """eval_loss""" in log.keys()]
UpperCamelCase__ :List[str] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCamelCase__ :List[Any] = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , lowerCamelCase__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Optional[Any] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any ):
self.run_seqaseq_quick(distributed=lowerCamelCase__ )
@require_torch_multi_gpu
def __a ( self :int ):
self.run_seqaseq_quick(distributed=lowerCamelCase__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Dict ):
self.run_seqaseq_quick(distributed=lowerCamelCase__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any] ):
self.run_seqaseq_quick(distributed=lowerCamelCase__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __a ( self :List[str] ):
self.run_seqaseq_quick(distributed=lowerCamelCase__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=lowerCamelCase__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __a ( self :str ):
self.run_seqaseq_quick(
distributed=lowerCamelCase__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=lowerCamelCase__ )
@require_apex
@require_torch_gpu
def __a ( self :str ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=lowerCamelCase__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowerCamelCase__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def __a ( self :List[str] , lowerCamelCase__ :Union[str, Any] ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCamelCase__ :Optional[int] = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
UpperCamelCase__ :Union[str, Any] = experiments[experiment_id]
UpperCamelCase__ :int = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
UpperCamelCase__ :Any = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowerCamelCase__ , extra_args_str=data["""extra_args_str"""] )
UpperCamelCase__ :Any = len(re.findall(lowerCamelCase__ , cl.err ) )
self.assertEqual(lowerCamelCase__ , data["""n_matches"""] )
@slow
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=lowerCamelCase__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=lowerCamelCase__ , )
# Check metrics
UpperCamelCase__ :Tuple = TrainerState.load_from_json(os.path.join(lowerCamelCase__ , """trainer_state.json""" ) ).log_history
UpperCamelCase__ :int = [log for log in logs if """eval_loss""" in log.keys()]
UpperCamelCase__ :Tuple = eval_metrics[0]
UpperCamelCase__ :List[Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , lowerCamelCase__ )
# test if do_predict saves generations and metrics
UpperCamelCase__ :int = os.listdir(lowerCamelCase__ )
UpperCamelCase__ :Dict = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :Union[str, Any] ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowerCamelCase__ :str ) -> Tuple[int, float]:
UpperCamelCase__ :List[str] = """--skip_memory_metrics 0"""
UpperCamelCase__ :Tuple = self.run_trainer(
max_len=1_28 , model_name=lowerCamelCase__ , learning_rate=3e-4 , num_train_epochs=1 , optim=lowerCamelCase__ , distributed=lowerCamelCase__ , extra_args_str=lowerCamelCase__ , do_eval=lowerCamelCase__ , do_predict=lowerCamelCase__ , n_gpus_to_use=1 , )
# Check metrics
UpperCamelCase__ :List[Any] = TrainerState.load_from_json(Path(lowerCamelCase__ , """trainer_state.json""" ) ).log_history
UpperCamelCase__ :List[Any] = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
UpperCamelCase__ :Tuple = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
UpperCamelCase__ :Tuple = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCamelCase__ :Optional[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCamelCase__ :List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCamelCase__ :str = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCamelCase__ :Optional[Any] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCamelCase__ :Union[str, Any] = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowerCamelCase__ , lowerCamelCase__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
lowerCamelCase__ , lowerCamelCase__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
lowerCamelCase__ , lowerCamelCase__ , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def __a ( self :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :float = 3e-3 , lowerCamelCase__ :str = "adafactor" , lowerCamelCase__ :bool = False , lowerCamelCase__ :str = None , lowerCamelCase__ :int = 0 , lowerCamelCase__ :bool = True , lowerCamelCase__ :bool = True , lowerCamelCase__ :bool = True , lowerCamelCase__ :bool = True , lowerCamelCase__ :int = None , ):
UpperCamelCase__ :List[Any] = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
UpperCamelCase__ :int = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :List[Any] = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowerCamelCase__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowerCamelCase__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
UpperCamelCase__ :Optional[Any] = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowerCamelCase__ )}
""".split()
UpperCamelCase__ :Optional[Any] = """
--do_predict
""".split()
UpperCamelCase__ :Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCamelCase__ :List[Any] = get_gpu_count()
UpperCamelCase__ :int = get_torch_dist_unique_port()
UpperCamelCase__ :Tuple = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
UpperCamelCase__ :Dict = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase__ , env=self.get_env() )
else:
UpperCamelCase__ :Dict = ["""run_translation.py"""] + args
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
main()
return output_dir
| 45 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 | 1 |
from math import sqrt
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :int = 0
UpperCamelCase__ :int = 0
UpperCamelCase__ :int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowercase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=13 , lowerCamelCase__ :List[str]=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Tuple=True , lowerCamelCase__ :Any=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :Union[str, Any]=5 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :List[str]=37 , lowerCamelCase__ :Any="gelu" , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :Optional[Any]=0.1 , lowerCamelCase__ :str=50 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :List[str]=None , ):
UpperCamelCase__ :Dict = parent
UpperCamelCase__ :Optional[Any] = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :Any = vocab_size
UpperCamelCase__ :str = hidden_size
UpperCamelCase__ :int = num_hidden_layers
UpperCamelCase__ :Dict = num_attention_heads
UpperCamelCase__ :Union[str, Any] = intermediate_size
UpperCamelCase__ :Optional[int] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :List[Any] = attention_probs_dropout_prob
UpperCamelCase__ :List[Any] = max_position_embeddings
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :str = use_labels
UpperCamelCase__ :Union[str, Any] = scope
def __a ( self :Optional[Any] ):
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase__ :str = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Optional[int] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.prepare_config_and_inputs()
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __a ( self :Dict , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :List[Any] , **lowerCamelCase__ :str , ):
UpperCamelCase__ :Dict = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] , lowerCamelCase__ :str , **lowerCamelCase__ :Union[str, Any] , ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :List[str] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
UpperCamelCase__ :Any = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :Any , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , **lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :int = True
UpperCamelCase__ :List[str] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ , )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Dict = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
UpperCamelCase__ :Optional[int] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :int = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Tuple , *lowerCamelCase__ :str , ):
UpperCamelCase__ :List[Any] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Optional[Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def __a ( self :List[Any] ):
UpperCamelCase__ :int = BertGenerationEncoderTester(self )
UpperCamelCase__ :Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ :Optional[Any] = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def __a ( self :Dict ):
# This regression test was failing with PyTorch < 1.3
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
def __a ( self :str ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :Tuple ):
UpperCamelCase__ :Any = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCamelCase__ :List[str] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
UpperCamelCase__ :str = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , lowerCamelCase__ )
UpperCamelCase__ :Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :int ):
UpperCamelCase__ :str = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCamelCase__ :Optional[Any] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :str = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , lowerCamelCase__ )
UpperCamelCase__ :Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 | 1 |
def A ( lowercase__ : int ) -> str:
if number > 0:
raise ValueError("""input must be a negative integer""" )
UpperCamelCase__ :List[Any] = len(bin(lowercase__ )[3:] )
UpperCamelCase__ :str = bin(abs(lowercase__ ) - (1 << binary_number_length) )[3:]
UpperCamelCase__ :List[Any] = (
(
"""1"""
+ """0""" * (binary_number_length - len(lowercase__ ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = KandinskyInpaintPipeline
_snake_case : int = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_snake_case : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_snake_case : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_snake_case : Tuple = False
@property
def __a ( self :Optional[int] ):
return 32
@property
def __a ( self :Any ):
return 32
@property
def __a ( self :str ):
return self.time_input_dim
@property
def __a ( self :List[Any] ):
return self.time_input_dim * 4
@property
def __a ( self :Dict ):
return 1_00
@property
def __a ( self :List[str] ):
UpperCamelCase__ :Any = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __a ( self :Dict ):
torch.manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
UpperCamelCase__ :Optional[Any] = MultilingualCLIP(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = text_encoder.eval()
return text_encoder
@property
def __a ( self :Dict ):
torch.manual_seed(0 )
UpperCamelCase__ :Optional[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase__ :Optional[Any] = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def __a ( self :List[Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self :str ):
torch.manual_seed(0 )
UpperCamelCase__ :Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ :str = self.dummy_tokenizer
UpperCamelCase__ :List[str] = self.dummy_unet
UpperCamelCase__ :Any = self.dummy_movq
UpperCamelCase__ :Optional[int] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCamelCase__ , )
UpperCamelCase__ :Optional[int] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int=0 ):
UpperCamelCase__ :Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCamelCase__ :int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCamelCase__ )
# create init_image
UpperCamelCase__ :Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCamelCase__ :str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :Any = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
UpperCamelCase__ :Any = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase__ :Optional[int] = 0
if str(lowerCamelCase__ ).startswith("""mps""" ):
UpperCamelCase__ :List[str] = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ :str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __a ( self :str ):
UpperCamelCase__ :Dict = """cpu"""
UpperCamelCase__ :Any = self.get_dummy_components()
UpperCamelCase__ :List[Any] = self.pipeline_class(**lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCamelCase__ :List[str] = output.images
UpperCamelCase__ :str = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
UpperCamelCase__ :str = image[0, -3:, -3:, -1]
UpperCamelCase__ :Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ :Optional[int] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __a ( self :Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
UpperCamelCase__ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCamelCase__ :List[Any] = np.ones((7_68, 7_68) , dtype=np.floataa )
UpperCamelCase__ :Dict = 0
UpperCamelCase__ :Optional[int] = """a hat"""
UpperCamelCase__ :Dict = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
UpperCamelCase__ :Optional[Any] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ :str = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCamelCase__ :int = pipeline(
lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
UpperCamelCase__ :List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 45 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 | 1 |
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = [10, 20, 30, 40, 50, 60]
UpperCamelCase__ :str = [2, 4, 6, 8, 10, 12]
UpperCamelCase__ :Dict = 1_00
self.assertEqual(kp.calc_profit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 2_10 )
def __a ( self :Dict ):
self.assertRaisesRegex(lowerCamelCase__ , """max_weight must greater than zero.""" )
def __a ( self :Union[str, Any] ):
self.assertRaisesRegex(lowerCamelCase__ , """Weight can not be negative.""" )
def __a ( self :Union[str, Any] ):
self.assertRaisesRegex(lowerCamelCase__ , """Profit can not be negative.""" )
def __a ( self :Union[str, Any] ):
self.assertRaisesRegex(lowerCamelCase__ , """max_weight must greater than zero.""" )
def __a ( self :Union[str, Any] ):
self.assertRaisesRegex(
lowerCamelCase__ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 45 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCamelCase = logging.getLogger(__name__)
def A ( lowercase__ : Tuple , lowercase__ : str ) -> Optional[int]:
# save results
if os.path.exists(lowercase__ ):
if os.path.exists(os.path.join(lowercase__ , """config.json""" ) ) and os.path.isfile(
os.path.join(lowercase__ , """config.json""" ) ):
os.remove(os.path.join(lowercase__ , """config.json""" ) )
if os.path.exists(os.path.join(lowercase__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(lowercase__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(lowercase__ , """pytorch_model.bin""" ) )
else:
os.makedirs(lowercase__ )
model.save_pretrained(lowercase__ )
def A ( lowercase__ : Union[str, Any] , lowercase__ : Tuple=False ) -> List[Any]:
UpperCamelCase__ :int = 2
if unlogit:
UpperCamelCase__ :int = torch.pow(lowercase__ , lowercase__ )
UpperCamelCase__ :List[Any] = p * torch.log(lowercase__ )
UpperCamelCase__ :Tuple = 0
return -plogp.sum(dim=-1 )
def A ( lowercase__ : Optional[int] ) -> int:
logger.info("""lv, h >\t""" + """\t""".join(f"""{x + 1}""" for x in range(len(lowercase__ ) ) ) )
for row in range(len(lowercase__ ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def A ( lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Union[str, Any]=True , lowercase__ : Dict=True , lowercase__ : Optional[Any]=None , lowercase__ : Any=False ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ :Dict = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCamelCase__ :Optional[Any] = torch.zeros(lowercase__ , lowercase__ ).to(args.device )
UpperCamelCase__ :List[Any] = torch.zeros(lowercase__ , lowercase__ ).to(args.device )
if head_mask is None:
UpperCamelCase__ :Tuple = torch.ones(lowercase__ , lowercase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowercase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCamelCase__ :int = None
UpperCamelCase__ :str = 0.0
UpperCamelCase__ :Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(lowercase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
UpperCamelCase__ :Optional[Any] = tuple(t.to(args.device ) for t in inputs )
((UpperCamelCase__) , ) :Optional[int] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCamelCase__ :List[Any] = model(lowercase__ , labels=lowercase__ , head_mask=lowercase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowercase__ ):
UpperCamelCase__ :Any = entropy(attn.detach() , lowercase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowercase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :int = torch.pow(torch.pow(lowercase__ , lowercase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
UpperCamelCase__ :Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(lowercase__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(lowercase__ )
logger.info("""Head ranked by importance scores""" )
UpperCamelCase__ :str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCamelCase__ :List[Any] = torch.arange(
head_importance.numel() , device=args.device )
UpperCamelCase__ :List[Any] = head_ranks.view_as(lowercase__ )
print_ad_tensor(lowercase__ )
return attn_entropy, head_importance, total_loss
def A ( lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Dict ) -> Union[str, Any]:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = compute_heads_importance(lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ )
UpperCamelCase__ :Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , lowercase__ , original_score * args.masking_threshold )
UpperCamelCase__ :Tuple = torch.ones_like(lowercase__ )
UpperCamelCase__ :List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCamelCase__ :Any = original_score
while current_score >= original_score * args.masking_threshold:
UpperCamelCase__ :Optional[int] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCamelCase__ :Optional[int] = float("""Inf""" )
UpperCamelCase__ :Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(lowercase__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
UpperCamelCase__ :int = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
UpperCamelCase__ :str = new_head_mask.view(-1 )
UpperCamelCase__ :Union[str, Any] = 0.0
UpperCamelCase__ :Union[str, Any] = new_head_mask.view_as(lowercase__ )
UpperCamelCase__ :Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(lowercase__ )
# Compute metric and head importance again
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = compute_heads_importance(
lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , head_mask=lowercase__ )
UpperCamelCase__ :Tuple = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , lowercase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(lowercase__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def A ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Optional[int] ) -> Union[str, Any]:
UpperCamelCase__ :Optional[Any] = datetime.now()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = compute_heads_importance(
lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , compute_importance=lowercase__ , head_mask=lowercase__ )
UpperCamelCase__ :Dict = 1 / loss
UpperCamelCase__ :Optional[Any] = datetime.now() - before_time
UpperCamelCase__ :str = sum(p.numel() for p in model.parameters() )
UpperCamelCase__ :Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowercase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ :Optional[int] = [
v,
]
assert sum(len(lowercase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowercase__ )
UpperCamelCase__ :int = sum(p.numel() for p in model.parameters() )
UpperCamelCase__ :Dict = datetime.now()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = compute_heads_importance(
lowercase__ , lowercase__ , lowercase__ , compute_entropy=lowercase__ , compute_importance=lowercase__ , head_mask=lowercase__ , actually_pruned=lowercase__ , )
UpperCamelCase__ :List[str] = 1 / loss
UpperCamelCase__ :List[str] = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , lowercase__ , lowercase__ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , lowercase__ , lowercase__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(lowercase__ , args.output_dir )
def A ( ) -> Any:
UpperCamelCase__ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=lowercase__ , type=lowercase__ , required=lowercase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=lowercase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=lowercase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=lowercase__ , type=lowercase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=lowercase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=lowercase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=lowercase__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=lowercase__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=lowercase__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=lowercase__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=lowercase__ , default=42 )
parser.add_argument("""--local_rank""" , type=lowercase__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=lowercase__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=lowercase__ , default="""""" , help="""Can be used for distant debugging.""" )
UpperCamelCase__ :List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowercase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCamelCase__ :Dict = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
UpperCamelCase__ :Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCamelCase__ :int = torch.device("""cuda""" , args.local_rank )
UpperCamelCase__ :List[str] = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCamelCase__ :Union[str, Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCamelCase__ :Any = nn.parallel.DistributedDataParallel(
lowercase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowercase__ )
elif args.n_gpu > 1:
UpperCamelCase__ :Tuple = nn.DataParallel(lowercase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowercase__ )
torch.save(lowercase__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Prepare dataset
UpperCamelCase__ :Optional[int] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCamelCase__ :int = (torch.from_numpy(lowercase__ ),)
UpperCamelCase__ :List[Any] = TensorDataset(*lowercase__ )
UpperCamelCase__ :Tuple = RandomSampler(lowercase__ )
UpperCamelCase__ :Optional[int] = DataLoader(lowercase__ , sampler=lowercase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowercase__ , lowercase__ , lowercase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCamelCase__ :Optional[int] = mask_heads(lowercase__ , lowercase__ , lowercase__ )
prune_heads(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 45 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 | 1 |
import requests
def A ( lowercase__ : str , lowercase__ : str ) -> None:
UpperCamelCase__ :Dict = {"""Content-Type""": """application/json"""}
UpperCamelCase__ :Optional[Any] = requests.post(lowercase__ , json={"""text""": message_body} , headers=lowercase__ )
if response.status_code != 200:
UpperCamelCase__ :Union[str, Any] = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 45 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 | 1 |
import torch
def A ( ) -> Optional[int]:
if torch.cuda.is_available():
UpperCamelCase__ :int = torch.cuda.device_count()
else:
UpperCamelCase__ :List[str] = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 45 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A ( lowercase__ : int ) -> str:
UpperCamelCase__ :str = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def A ( lowercase__ : str ) -> int:
UpperCamelCase__ :int = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCamelCase__ :List[str] = s_dict.pop(lowercase__ )
elif "subsample" in key:
UpperCamelCase__ :Any = s_dict.pop(lowercase__ )
def A ( lowercase__ : Optional[int] ) -> str:
UpperCamelCase__ , UpperCamelCase__ :int = emb.weight.shape
UpperCamelCase__ :Optional[Any] = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
UpperCamelCase__ :Optional[Any] = emb.weight.data
return lin_layer
def A ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Union[str, Any]:
UpperCamelCase__ :Any = torch.load(lowercase__ , map_location="""cpu""" )
UpperCamelCase__ :int = mam_aaa["""args"""]
UpperCamelCase__ :Union[str, Any] = mam_aaa["""model"""]
UpperCamelCase__ :Optional[int] = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(lowercase__ )
rename_keys(lowercase__ )
UpperCamelCase__ :int = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCamelCase__ :List[str] = args.share_decoder_input_output_embed
UpperCamelCase__ :Dict = [int(lowercase__ ) for i in args.conv_kernel_sizes.split(""",""" )]
UpperCamelCase__ :str = SpeechaTextConfig(
vocab_size=lowercase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(lowercase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowercase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowercase__ , num_beams=5 , max_length=200 , use_cache=lowercase__ , decoder_start_token_id=2 , early_stopping=lowercase__ , )
UpperCamelCase__ :List[Any] = SpeechaTextForConditionalGeneration(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :int = model.model.load_state_dict(lowercase__ , strict=lowercase__ )
if len(lowercase__ ) > 0 and not set(lowercase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
UpperCamelCase__ :Optional[int] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase__ :Dict = lm_head_weights
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 45 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 | 1 |
from __future__ import annotations
from math import gcd
def A ( lowercase__ : int , lowercase__ : int = 2 , lowercase__ : int = 1 , lowercase__ : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
return (pow(lowercase__ , 2 ) + step) % modulus
for _ in range(lowercase__ ):
# These track the position within the cycle detection logic.
UpperCamelCase__ :Dict = seed
UpperCamelCase__ :List[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCamelCase__ :Optional[int] = rand_fn(lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :List[str] = rand_fn(lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :str = rand_fn(lowercase__ , lowercase__ , lowercase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCamelCase__ :str = gcd(hare - tortoise , lowercase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCamelCase__ :Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
UpperCamelCase = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''')
| 45 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.