code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=99 , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Tuple=None , ) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =parent
lowerCamelCase__: int =13
lowerCamelCase__: List[str] =7
lowerCamelCase__: int =True
lowerCamelCase__: int =True
lowerCamelCase__: Tuple =True
lowerCamelCase__: List[str] =True
lowerCamelCase__: Any =99
lowerCamelCase__: List[Any] =32
lowerCamelCase__: Union[str, Any] =2
lowerCamelCase__: List[str] =4
lowerCamelCase__: Optional[Any] =37
lowerCamelCase__: Any ="gelu"
lowerCamelCase__: int =0.1
lowerCamelCase__: List[str] =0.1
lowerCamelCase__: str =512
lowerCamelCase__: Tuple =16
lowerCamelCase__: Optional[int] =2
lowerCamelCase__: List[Any] =0.02
lowerCamelCase__: str =3
lowerCamelCase__: int =4
lowerCamelCase__: int =None
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCamelCase__: Optional[Any] =None
if self.use_input_mask:
lowerCamelCase__: Tuple =random_attention_mask([self.batch_size, self.seq_length])
lowerCamelCase__: Optional[Any] =None
if self.use_token_type_ids:
lowerCamelCase__: Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCamelCase__: Dict =None
lowerCamelCase__: Optional[Any] =None
lowerCamelCase__: str =None
if self.use_labels:
lowerCamelCase__: Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCamelCase__: Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCamelCase__: str =ids_tensor([self.batch_size] , self.num_choices)
lowerCamelCase__: List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =TFRoFormerModel(config=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase__: List[Any] =[input_ids, input_mask]
lowerCamelCase__: Dict =model(UpperCAmelCase_)
lowerCamelCase__: str =model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =True
lowerCamelCase__: int =TFRoFormerForCausalLM(config=UpperCAmelCase_)
lowerCamelCase__: Optional[int] ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase__: Any =model(UpperCAmelCase_)["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape) , [self.batch_size, self.seq_length, self.vocab_size])
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase_)
lowerCamelCase__: Tuple ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.num_labels
lowerCamelCase__: Tuple =TFRoFormerForSequenceClassification(config=UpperCAmelCase_)
lowerCamelCase__: List[Any] ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase__: Tuple =model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =self.num_choices
lowerCamelCase__: List[Any] =TFRoFormerForMultipleChoice(config=UpperCAmelCase_)
lowerCamelCase__: List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase_ , 1) , (1, self.num_choices, 1))
lowerCamelCase__: List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase_ , 1) , (1, self.num_choices, 1))
lowerCamelCase__: int =tf.tile(tf.expand_dims(UpperCAmelCase_ , 1) , (1, self.num_choices, 1))
lowerCamelCase__: List[Any] ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCamelCase__: Tuple =model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.num_labels
lowerCamelCase__: Optional[int] =TFRoFormerForTokenClassification(config=UpperCAmelCase_)
lowerCamelCase__: Dict ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =TFRoFormerForQuestionAnswering(config=UpperCAmelCase_)
lowerCamelCase__: Optional[int] ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase__: Optional[Any] =model(UpperCAmelCase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
): int =config_and_inputs
lowerCamelCase__: Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int) ->str:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
lowerCamelCase__: int =TFRoFormerModelTester(self)
lowerCamelCase__: Any =ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Dict =TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base")
self.assertIsNotNone(UpperCAmelCase_)
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base")
lowerCamelCase__: Union[str, Any] =tf.constant([[0, 1, 2, 3, 4, 5]])
lowerCamelCase__: int =model(UpperCAmelCase_)[0]
# TODO Replace vocab size
lowerCamelCase__: str =50_000
lowerCamelCase__: int =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_)
print(output[:, :3, :3])
# TODO Replace values below with what was printed above.
lowerCamelCase__: List[Any] =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
])
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4)
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 1E-4
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =tf.constant([[4, 10]])
lowerCamelCase__: Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6)
lowerCamelCase__: Dict =emba(input_ids.shape)
lowerCamelCase__: Dict =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]])
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance)
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
])
lowerCamelCase__: List[str] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512)
emba([2, 16, 512])
lowerCamelCase__: str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance)
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 1E-4
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 100
lowerCamelCase__: Tuple =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa) , shape=(2, 12, 16, 64)) / 100
lowerCamelCase__: Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64)
lowerCamelCase__: int =embed_positions([2, 16, 768])[None, None, :, :]
lowerCamelCase__ , lowerCamelCase__: int =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
])
lowerCamelCase__: Union[str, Any] =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
])
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance)
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance)
| 10 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( __a = "AAPL" ) -> str:
"""simple docstring"""
lowerCamelCase__: int =F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
lowerCamelCase__: List[str] =BeautifulSoup(requests.get(__a ).text , "html.parser" )
lowerCamelCase__: str ="My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: str =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: int =eos_token_id
lowerCamelCase__: Union[str, Any] =pad_token_id
lowerCamelCase__: List[str] =bos_token_id
lowerCamelCase__: int =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =20
lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: str =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: List[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: Dict =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =20
lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: str =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__: Optional[Any] =input_ids.shape[0]
lowerCamelCase__: List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data()
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_)
lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
lowerCamelCase__: int ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("JIT Enabled"):
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id
lowerCamelCase__: str =model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_)
lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
lowerCamelCase__: Any =["Sam"]
lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax")
lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic."
lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 10 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = ProphetNetTokenizer
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
super().setUp()
lowerCamelCase__: Union[str, Any] =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: int ="UNwant\u00E9d,running"
lowerCamelCase__: Optional[Any] ="unwanted, running"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =self.tokenizer_class(self.vocab_file)
lowerCamelCase__: Tuple =tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(UpperCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [9, 6, 7, 12, 10, 11])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =BasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: int =BasicTokenizer(do_lower_case=UpperCAmelCase_ , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: int =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCamelCase__: Any ={}
for i, token in enumerate(UpperCAmelCase_):
lowerCamelCase__: str =i
lowerCamelCase__: Union[str, Any] =WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
@require_torch
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: int =self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
lowerCamelCase__: int =["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCamelCase__: Any =[1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
lowerCamelCase__: Optional[Any] =tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="pt")
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: int =list(batch.input_ids.numpy()[0])
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
lowerCamelCase__: str =tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: int =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
lowerCamelCase__: int =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 10 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 1 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 1 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "prophetnet.tokenizer"}
__A = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
lowerCamelCase__: int =reader.readlines()
for index, token in enumerate(__a ):
lowerCamelCase__: List[str] =token.rstrip("\n" )
lowerCamelCase__: List[Any] =index
return vocab
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: Optional[int] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
lowerCamelCase__: Optional[int] =F"""[unused{i}]"""
lowerCamelCase__: int =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase__: int =12
lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_)
def __getstate__(self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.__dict__.copy()
lowerCamelCase__: Dict =None
return state
def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: Dict ={}
lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_)) + [1]
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase__: Union[str, Any] =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "gpt_neox_japanese"
def __init__(self : Dict , UpperCAmelCase_ : List[Any]=32_000 , UpperCAmelCase_ : str=2_560 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Any=1.00 , UpperCAmelCase_ : List[Any]=10_000 , UpperCAmelCase_ : int=2_048 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : str=1E-5 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Any=31_996 , UpperCAmelCase_ : Optional[int]=31_999 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.0 , **UpperCAmelCase_ : Optional[int] , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: int =vocab_size
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: Tuple =hidden_size
lowerCamelCase__: List[str] =num_hidden_layers
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: str =intermediate_multiple_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =rotary_pct
lowerCamelCase__: Union[str, Any] =rotary_emb_base
lowerCamelCase__: Any =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: Any =use_cache
lowerCamelCase__: Any =attention_dropout
lowerCamelCase__: Union[str, Any] =hidden_dropout
| 10 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : str=5 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : str=10 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Dict="divided_space_time" , UpperCAmelCase_ : Optional[int]=None , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Any =image_size
lowerCamelCase__: int =num_channels
lowerCamelCase__: Optional[Any] =patch_size
lowerCamelCase__: Optional[int] =num_frames
lowerCamelCase__: int =is_training
lowerCamelCase__: List[str] =use_labels
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: str =num_hidden_layers
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: List[str] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[str] =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[Any] =attention_type
lowerCamelCase__: Tuple =initializer_range
lowerCamelCase__: Optional[int] =scope
lowerCamelCase__: Union[str, Any] =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowerCamelCase__: Union[str, Any] =(image_size // patch_size) ** 2
lowerCamelCase__: Dict =(num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
lowerCamelCase__: int =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Tuple =None
if self.use_labels:
lowerCamelCase__: List[Any] =ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__: Any =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowerCamelCase__: List[Any] =self.num_labels
return config
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: Any =TimesformerModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =TimesformerForVideoClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Optional[int] =model(UpperCAmelCase_)
# verify the logits shape
lowerCamelCase__: int =torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] =config_and_inputs
lowerCamelCase__: Any ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase_ = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: str =TimesformerModelTester(self)
lowerCamelCase__: List[str] =ConfigTester(
self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Any=False) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =copy.deepcopy(UpperCAmelCase_)
if return_labels:
if model_class in get_values(UpperCAmelCase_):
lowerCamelCase__: str =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Any =model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowerCamelCase__: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Any =[*signature.parameters.keys()]
lowerCamelCase__: Any =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Union[str, Any] =TimesformerModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
if not self.has_attentions:
pass
else:
lowerCamelCase__ , lowerCamelCase__: Any =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: Tuple =True
for model_class in self.all_model_classes:
lowerCamelCase__: List[str] =self.model_tester.seq_length
lowerCamelCase__: Union[str, Any] =self.model_tester.num_frames
lowerCamelCase__: List[Any] =True
lowerCamelCase__: str =False
lowerCamelCase__: List[Any] =True
lowerCamelCase__: Union[str, Any] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: int =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: str =outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__: List[Any] =True
lowerCamelCase__: str =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: str =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Optional[Any] =outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowerCamelCase__: str =len(UpperCAmelCase_)
# Check attention is always last and order is fine
lowerCamelCase__: Dict =True
lowerCamelCase__: Union[str, Any] =True
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Optional[Any] =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
self.assertEqual(out_len + 1 , len(UpperCAmelCase_))
lowerCamelCase__: Optional[int] =outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str):
lowerCamelCase__: str =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Tuple =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Union[str, Any] =outputs.hidden_states
lowerCamelCase__: Optional[Any] =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
lowerCamelCase__: List[str] =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: str =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: List[str] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCamelCase__: List[str] =np.load(__a )
return list(__a )
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400").to(
UpperCAmelCase_)
lowerCamelCase__: Dict =self.default_image_processor
lowerCamelCase__: List[Any] =prepare_video()
lowerCamelCase__: Optional[Any] =image_processor(video[:8] , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: int =model(**UpperCAmelCase_)
# verify the logits
lowerCamelCase__: List[str] =torch.Size((1, 400))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =torch.tensor([-0.3016, -0.7713, -0.4205]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4))
| 10 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 1 |
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase_ ( __a , __a , __a=None , **__a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Any =[x.strip() for x in open(__a ).readlines()]
lowerCamelCase__: Dict =[x.strip() for x in open(__a ).readlines()][: len(__a )]
lowerCamelCase__: str =calculate_rouge(__a , __a , **__a )
if save_path is not None:
save_json(__a , __a , indent=__a )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(UpperCAmelCase_)
def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
if "text_queries" in kwargs:
lowerCamelCase__: Any =kwargs.pop("text_queries")
if isinstance(UpperCAmelCase_ , (str, Image.Image)):
lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels}
else:
lowerCamelCase__: Any =image
lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
return results
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] ={}
if "threshold" in kwargs:
lowerCamelCase__: List[Any] =kwargs["threshold"]
if "top_k" in kwargs:
lowerCamelCase__: Any =kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(inputs["image"])
lowerCamelCase__: Dict =inputs["candidate_labels"]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Any =candidate_labels.split(",")
lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(UpperCAmelCase_):
lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework)
lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(UpperCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model_inputs.pop("target_size")
lowerCamelCase__: Dict =model_inputs.pop("candidate_label")
lowerCamelCase__: Dict =model_inputs.pop("is_last")
lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_)
lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =[]
for model_output in model_outputs:
lowerCamelCase__: Optional[Any] =model_output["candidate_label"]
lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_)
lowerCamelCase__: Dict =self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
lowerCamelCase__: Dict =outputs["scores"][index].item()
lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0])
lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box}
results.append(UpperCAmelCase_)
lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_)
if top_k:
lowerCamelCase__: Dict =results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist()
lowerCamelCase__: Optional[int] ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 10 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = "Hello, World!"
__A = "en_XX"
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =Path("data_bin" )
lowerCamelCase__: List[Any] =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__a ).parent ) , checkpoint_file=Path(__a ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(__a ) , bpe="sentencepiece" , sentencepiece_model=str(Path(__a ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(__a )
lowerCamelCase__: List[str] =xmod.model.encoder.sentence_encoder
lowerCamelCase__: Tuple =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase__: int =xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , __a )
lowerCamelCase__: Optional[int] =XmodForSequenceClassification(__a ) if classification_head else XmodForMaskedLM(__a )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase__: List[str] =xmod_sent_encoder.embed_tokens.weight
lowerCamelCase__: str =xmod_sent_encoder.embed_positions.weight
lowerCamelCase__: str =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase__: str =xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase__: List[Any] =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase__: List[Any] =model.roberta.encoder.layer[i]
lowerCamelCase__: int =xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase__: Any =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
lowerCamelCase__: Optional[Any] =xmod_layer.self_attn.q_proj.weight
lowerCamelCase__: Dict =xmod_layer.self_attn.q_proj.bias
lowerCamelCase__: Any =xmod_layer.self_attn.k_proj.weight
lowerCamelCase__: str =xmod_layer.self_attn.k_proj.bias
lowerCamelCase__: Any =xmod_layer.self_attn.v_proj.weight
lowerCamelCase__: int =xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase__: Optional[int] =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
lowerCamelCase__: Any =xmod_layer.self_attn.out_proj.weight
lowerCamelCase__: Union[str, Any] =xmod_layer.self_attn.out_proj.bias
lowerCamelCase__: List[Any] =xmod_layer.self_attn_layer_norm.weight
lowerCamelCase__: Any =xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase__: Tuple =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
lowerCamelCase__: int =xmod_layer.fca.weight
lowerCamelCase__: Optional[Any] =xmod_layer.fca.bias
# output
lowerCamelCase__: Optional[Any] =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
lowerCamelCase__: Dict =xmod_layer.fca.weight
lowerCamelCase__: Any =xmod_layer.fca.bias
lowerCamelCase__: Union[str, Any] =xmod_layer.final_layer_norm.weight
lowerCamelCase__: List[str] =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase__: Any =xmod_layer.adapter_layer_norm.weight
lowerCamelCase__: Optional[Any] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase__: Optional[int] =bert_output.adapter_modules[lang_code]
lowerCamelCase__: str =xmod_layer.adapter_modules[lang_code]
lowerCamelCase__: Optional[int] =from_adapter.fca.weight
lowerCamelCase__: Dict =from_adapter.fca.bias
lowerCamelCase__: Tuple =from_adapter.fca.weight
lowerCamelCase__: List[Any] =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase__: Optional[Any] =xmod_sent_encoder.layer_norm.weight
lowerCamelCase__: Optional[int] =xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase__: Optional[int] =xmod.model.classification_heads["mnli"].dense.weight
lowerCamelCase__: List[str] =xmod.model.classification_heads["mnli"].dense.bias
lowerCamelCase__: List[Any] =xmod.model.classification_heads["mnli"].out_proj.weight
lowerCamelCase__: List[Any] =xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowerCamelCase__: int =xmod.model.encoder.lm_head.dense.weight
lowerCamelCase__: Union[str, Any] =xmod.model.encoder.lm_head.dense.bias
lowerCamelCase__: int =xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase__: Any =xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase__: Tuple =xmod.model.encoder.lm_head.weight
lowerCamelCase__: Tuple =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase__: Optional[int] =xmod.encode(__a ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__a )
lowerCamelCase__: str =model(__a )[0]
if classification_head:
lowerCamelCase__: Dict =xmod.model.classification_heads["mnli"](xmod.extract_features(__a ) )
else:
lowerCamelCase__: Any =xmod.model(__a , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase__: Dict =torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase__: Optional[int] =torch.allclose(__a , __a , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(__a ).mkdir(parents=__a , exist_ok=__a )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__A = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 10 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 1 |
import numpy as np
def lowerCAmelCase_ ( __a ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_ ( __a ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =pa.array(TypedSequence([1, 2, 3]))
self.assertEqual(arr.type , pa.intaa())
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase_):
lowerCamelCase__: str =pa.array(TypedSequence([1, 2, 3]) , type=pa.intaa())
def SCREAMING_SNAKE_CASE_ (self : Any) ->Dict:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase_):
lowerCamelCase__: str =pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool") , type=Value("int64")))
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =pa.array(TypedSequence([1, 2, 3] , type=Value("int32")))
self.assertEqual(arr.type , pa.intaa())
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
lowerCamelCase__: Tuple =pa.array(TypedSequence(["foo", "bar"] , type=Value("int64")))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32")))
self.assertEqual(arr.type , pa.intaa())
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64")))
self.assertEqual(arr.type , pa.string())
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64")))
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64"))
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
lowerCamelCase__: Tuple =pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64")))
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64")))
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64"))
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64")))
self.assertEqual(arr.type , pa.string())
@require_pil
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[str]:
'''simple docstring'''
import PIL.Image
lowerCamelCase__: Any =PIL.Image.fromarray(np.arange(10 , dtype=np.uinta).reshape(2 , 5))
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=UpperCAmelCase_) as mock_cast_to_python_objects:
lowerCamelCase__: Optional[int] =pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image()))
lowerCamelCase__ , lowerCamelCase__: Dict =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , UpperCAmelCase_)
self.assertFalse(kwargs["optimize_list_casting"])
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Dict =pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a )
lowerCamelCase__: Union[str, Any] =pa.ipc.open_stream(__a )
lowerCamelCase__: pa.Table =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =pa.BufferOutputStream()
lowerCamelCase__: List[Any] =pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCamelCase__ , lowerCamelCase__: int =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__: Union[str, Any] ={"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =pa.BufferOutputStream()
lowerCamelCase__: List[str] =Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=__a , features=__a ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
lowerCamelCase__ , lowerCamelCase__: Any =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCamelCase__: Union[str, Any] =pa.BufferReader(output.getvalue() )
lowerCamelCase__: Dict =pa.ipc.open_stream(__a )
lowerCamelCase__: pa.Table =f.read_all()
lowerCamelCase__: str =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__a )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Tuple =pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Any =pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
lowerCamelCase__ , lowerCamelCase__: int =writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: List[Any] =pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
lowerCamelCase__ , lowerCamelCase__: str =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( __a , __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Tuple =pa.BufferOutputStream()
lowerCamelCase__: List[Any] =pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
lowerCamelCase__ , lowerCamelCase__: List[str] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__: Optional[int] ={"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Dict =pa.BufferOutputStream()
lowerCamelCase__: Union[str, Any] =pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
lowerCamelCase__ , lowerCamelCase__: Any =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__: List[str] ={"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: Tuple =pa.BufferOutputStream()
lowerCamelCase__: Any =pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase__: int ={"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__: List[str] ={"col_1": pa.string(), "col_2": pa.intaa()}
lowerCamelCase__: Tuple =os.path.join(__a , "test.arrow" )
with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
lowerCamelCase__ , lowerCamelCase__: Optional[int] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(__a , 1 )
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
if pa.types.is_list(__a ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
if isinstance(lst[0] , __a ):
change_first_primitive_element_in_list(lst[0] , __a )
else:
lowerCamelCase__: Tuple =value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCAmelCase_ ( __a , __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: List[Any] =pa.array(TypedSequence(__a , optimized_int_type=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCamelCase__: Any =copy.deepcopy(__a )
lowerCamelCase__: Any =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__a , __a )
lowerCamelCase__: Optional[Any] =pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: str =str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=__a ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: str ="mock://dataset-train.arrow"
with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__a ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCamelCase__ , lowerCamelCase__: str =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__a )
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =pa.BufferOutputStream()
with ParquetWriter(stream=__a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCamelCase__: Tuple =pa.BufferReader(output.getvalue() )
lowerCamelCase__: pa.Table =pq.read_table(__a )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
import PIL.Image
lowerCamelCase__: Tuple =str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="png" )
lowerCamelCase__: Union[str, Any] =pa.BufferOutputStream()
with ParquetWriter(
stream=__a , features=Features({"image": Image()} ) , embed_local_files=__a ) as writer:
writer.write({"image": image_path} )
writer.finalize()
lowerCamelCase__: Any =pa.BufferReader(output.getvalue() )
lowerCamelCase__: pa.Table =pq.read_table(__a )
lowerCamelCase__: Tuple =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , __a )
with open(__a , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =pa.schema([pa.field("col_1" , pa.string() , nullable=__a )] )
lowerCamelCase__: Dict =pa.BufferOutputStream()
with ArrowWriter(stream=__a ) as writer:
writer._build_writer(inferred_schema=__a )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 10 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
lowerCamelCase__: List[Any] =len(__a )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__a ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __a , __a , )
def lowerCAmelCase_ ( __a ) -> None:
"""simple docstring"""
lowerCamelCase__: list[list[str]] =[]
depth_first_search([] , [] , [] , __a , __a )
# Print all the boards
for board in boards:
for column in board:
print(__a )
print("" )
print(len(__a ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 10 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 1 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int]) ->Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int) ->str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str]) ->List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any]) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any) ->Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : int) ->Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]) ->List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str) ->str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any]) ->Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict) ->Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict) ->Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict) ->int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : int) ->Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str]) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str]) ->str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : Any , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["sentencepiece"]
def __init__(self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int) ->List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"])
| 10 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __a , __a , __a , ) -> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
return len(set(__a ) ) == len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: str =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 10 | 1 |
from __future__ import annotations
from typing import TypedDict
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def lowerCAmelCase_ ( __a ) -> list[str]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__a ) )]
def lowerCAmelCase_ ( __a ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
lowerCamelCase__: int =all_rotations(__a )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowerCamelCase__: BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__a ),
}
return response
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
lowerCamelCase__: Tuple =int(__a )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__a ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
lowerCamelCase__: List[str] =[""] * len(__a )
for _ in range(len(__a ) ):
for i in range(len(__a ) ):
lowerCamelCase__: List[str] =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__A = "Provide a string that I will generate its BWT transform: "
__A = input(entry_msg).strip()
__A = bwt_transform(s)
print(
f'Burrows Wheeler transform for string \'{s}\' results '
f'in \'{result["bwt_string"]}\''
)
__A = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
f'we get original string \'{original_string}\''
)
| 10 |
import itertools
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Optional[int] =2
while True:
if is_prime(__a ):
yield num
num += 1
def lowerCAmelCase_ ( __a = 10001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = StableDiffusionXLImgaImgPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowercase_ = PipelineTesterMixin.required_optional_params - {"latents"}
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Tuple =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase_ , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase__: Tuple =EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0)
lowerCamelCase__: Tuple =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
lowerCamelCase__: Union[str, Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , )
lowerCamelCase__: Optional[Any] =CLIPTextModel(UpperCAmelCase_)
lowerCamelCase__: List[str] =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =CLIPTextModelWithProjection(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=UpperCAmelCase_)
lowerCamelCase__: int ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=0) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
lowerCamelCase__: Any =image / 2 + 0.5
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: str =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: List[str] =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: Any ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: str ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: List[str] =self.get_dummy_components()
lowerCamelCase__: Union[str, Any] =StableDiffusionXLImgaImgPipeline(**UpperCAmelCase_)
lowerCamelCase__: Dict =sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Dict =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =sd_pipe(**UpperCAmelCase_).images
lowerCamelCase__: int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__: List[Any] =np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.get_dummy_components()
lowerCamelCase__: Dict =StableDiffusionXLImgaImgPipeline(**UpperCAmelCase_)
lowerCamelCase__: str =sd_pipe.to(UpperCAmelCase_)
lowerCamelCase__: List[Any] =sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
# forward without prompt embeds
lowerCamelCase__: int =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: List[Any] =3 * ["this is a negative prompt"]
lowerCamelCase__: Tuple =negative_prompt
lowerCamelCase__: int =3 * [inputs["prompt"]]
lowerCamelCase__: Tuple =sd_pipe(**UpperCAmelCase_)
lowerCamelCase__: Tuple =output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase__: Union[str, Any] =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: Dict =3 * ["this is a negative prompt"]
lowerCamelCase__: Any =3 * [inputs.pop("prompt")]
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
): Tuple =sd_pipe.encode_prompt(UpperCAmelCase_ , negative_prompt=UpperCAmelCase_)
lowerCamelCase__: int =sd_pipe(
**UpperCAmelCase_ , prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , pooled_prompt_embeds=UpperCAmelCase_ , negative_pooled_prompt_embeds=UpperCAmelCase_ , )
lowerCamelCase__: Optional[int] =output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]="cpu" , UpperCAmelCase_ : Optional[int]=torch.floataa , UpperCAmelCase_ : Any=0) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =np.random.RandomState(UpperCAmelCase_).standard_normal((1, 4, 64, 64))
lowerCamelCase__: Union[str, Any] =torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_)
lowerCamelCase__: Any ={
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base")
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: int =self.get_inputs(UpperCAmelCase_)
lowerCamelCase__: str =pipe(**UpperCAmelCase_).images
lowerCamelCase__: Tuple =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__: int =np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506])
assert np.abs(image_slice - expected_slice).max() < 7E-3
| 10 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30}
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30}
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =min_resolution
lowerCamelCase__: Union[str, Any] =max_resolution
lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: str =crop_pct
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =do_normalize
lowerCamelCase__: List[str] =image_mean
lowerCamelCase__: Tuple =image_std
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 30})
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30})
lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 10 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"text": Value("string" )} )
lowercase_ = Features({} )
lowercase_ = "text"
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 10 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: str =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: int =eos_token_id
lowerCamelCase__: Union[str, Any] =pad_token_id
lowerCamelCase__: List[str] =bos_token_id
lowerCamelCase__: int =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =20
lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: str =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: List[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: Dict =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =20
lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: str =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__: Optional[Any] =input_ids.shape[0]
lowerCamelCase__: List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data()
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_)
lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
lowerCamelCase__: int ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("JIT Enabled"):
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id
lowerCamelCase__: str =model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_)
lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
lowerCamelCase__: Any =["Sam"]
lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax")
lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic."
lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 10 | 1 |
import doctest
from collections import deque
import numpy as np
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Any) ->None:
'''simple docstring'''
lowerCamelCase__: Tuple =[2, 1, 2, -1]
lowerCamelCase__: List[Any] =[1, 2, 3, 4]
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->list[float]:
'''simple docstring'''
lowerCamelCase__: Dict =len(self.first_signal)
lowerCamelCase__: Union[str, Any] =len(self.second_signal)
lowerCamelCase__: int =max(UpperCAmelCase_ , UpperCAmelCase_)
# create a zero matrix of max_length x max_length
lowerCamelCase__: List[Any] =[[0] * max_length for i in range(UpperCAmelCase_)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCAmelCase_):
lowerCamelCase__: List[str] =deque(self.second_signal)
rotated_signal.rotate(UpperCAmelCase_)
for j, item in enumerate(UpperCAmelCase_):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCamelCase__: Optional[int] =np.matmul(np.transpose(UpperCAmelCase_) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(UpperCAmelCase_ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 10 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "prophetnet.tokenizer"}
__A = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
lowerCamelCase__: int =reader.readlines()
for index, token in enumerate(__a ):
lowerCamelCase__: List[str] =token.rstrip("\n" )
lowerCamelCase__: List[Any] =index
return vocab
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: Optional[int] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
lowerCamelCase__: Optional[int] =F"""[unused{i}]"""
lowerCamelCase__: int =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase__: int =12
lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_)
def __getstate__(self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.__dict__.copy()
lowerCamelCase__: Dict =None
return state
def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: Dict ={}
lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_)) + [1]
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase__: Union[str, Any] =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 1 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 10 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Any="<s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : Dict="<unk>" , UpperCAmelCase_ : Union[str, Any]="<sep>" , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : Tuple="<cls>" , UpperCAmelCase_ : Any="<mask>" , UpperCAmelCase_ : Optional[int]=["<eop>", "<eod>"] , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Optional[Any] , ) ->None:
'''simple docstring'''
lowerCamelCase__: str =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =3
lowerCamelCase__: Dict =do_lower_case
lowerCamelCase__: Any =remove_space
lowerCamelCase__: int =keep_accents
lowerCamelCase__: List[str] =vocab_file
lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase_)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation.")
lowerCamelCase__: Optional[Any] =jieba
lowerCamelCase__: Union[str, Any] =str.maketrans(" \n" , "\u2582\u2583")
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
return len(self.sp_model)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Dict ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.__dict__.copy()
lowerCamelCase__: str =None
return state
def __setstate__(self : str , UpperCAmelCase_ : Tuple) ->int:
'''simple docstring'''
lowerCamelCase__: List[str] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: str ={}
lowerCamelCase__: List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
if self.remove_space:
lowerCamelCase__: Union[str, Any] =" ".join(inputs.strip().split())
else:
lowerCamelCase__: int =inputs
lowerCamelCase__: Any =outputs.replace("``" , "\"").replace("''" , "\"")
if not self.keep_accents:
lowerCamelCase__: Optional[Any] =unicodedata.normalize("NFKD" , UpperCAmelCase_)
lowerCamelCase__: Optional[int] ="".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_)])
if self.do_lower_case:
lowerCamelCase__: Optional[Any] =outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Any =self.preprocess_text(UpperCAmelCase_)
lowerCamelCase__: str =self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[]
for piece in pieces:
if len(UpperCAmelCase_) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
lowerCamelCase__: Dict =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
lowerCamelCase__: Tuple =cur_pieces[1:]
else:
lowerCamelCase__: Any =cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(UpperCAmelCase_)
else:
new_pieces.append(UpperCAmelCase_)
return new_pieces
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple) ->Tuple:
'''simple docstring'''
return self.sp_model.PieceToId(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =[self.sep_token_id]
lowerCamelCase__: Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1, 1]
return ([0] * len(UpperCAmelCase_)) + [1, 1]
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.sep_token_id]
lowerCamelCase__: Any =[2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: List[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Any) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =super()._decode(*UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Dict =text.replace(" " , "").replace("\u2582" , " ").replace("\u2583" , "\n")
return text
| 10 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__(self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 50_257 , UpperCAmelCase_ : int = 1_024 , UpperCAmelCase_ : int = 768 , UpperCAmelCase_ : int = 12 , UpperCAmelCase_ : int = 12 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "gelu_new" , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 1E-5 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , ) ->str:
'''simple docstring'''
super().__init__()
lowerCamelCase__: List[str] =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""")
lowerCamelCase__: List[str] =prefix_inner_dim
lowerCamelCase__: Union[str, Any] =prefix_hidden_dim
lowerCamelCase__: Tuple =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase__: Any =(
nn.Linear(self.prefix_hidden_dim , UpperCAmelCase_) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase__: Dict =GPTaConfig(
vocab_size=UpperCAmelCase_ , n_positions=UpperCAmelCase_ , n_embd=UpperCAmelCase_ , n_layer=UpperCAmelCase_ , n_head=UpperCAmelCase_ , n_inner=UpperCAmelCase_ , activation_function=UpperCAmelCase_ , resid_pdrop=UpperCAmelCase_ , embd_pdrop=UpperCAmelCase_ , attn_pdrop=UpperCAmelCase_ , layer_norm_epsilon=UpperCAmelCase_ , initializer_range=UpperCAmelCase_ , scale_attn_weights=UpperCAmelCase_ , use_cache=UpperCAmelCase_ , scale_attn_by_inverse_layer_idx=UpperCAmelCase_ , reorder_and_upcast_attn=UpperCAmelCase_ , )
lowerCamelCase__: int =GPTaLMHeadModel(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =self.transformer.transformer.wte(UpperCAmelCase_)
lowerCamelCase__: List[str] =self.encode_prefix(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.decode_prefix(UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
lowerCamelCase__: List[Any] =self.get_dummy_token(input_ids.shape[0] , input_ids.device)
lowerCamelCase__: List[Any] =torch.cat((dummy_token, input_ids) , dim=1)
lowerCamelCase__: Tuple =self.transformer(inputs_embeds=UpperCAmelCase_ , labels=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.device) ->torch.Tensor:
'''simple docstring'''
return torch.zeros(UpperCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
return self.encode_prefix(UpperCAmelCase_)
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =torch.split(UpperCAmelCase_ , 1 , dim=0)
lowerCamelCase__: Dict =[]
lowerCamelCase__: List[Any] =[]
for feature in features:
lowerCamelCase__: Tuple =self.decode_prefix(feature.to(UpperCAmelCase_)) # back to the clip feature
# Only support beam search for now
lowerCamelCase__ , lowerCamelCase__: Tuple =self.generate_beam(
input_embeds=UpperCAmelCase_ , device=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
lowerCamelCase__: Union[str, Any] =torch.stack(UpperCAmelCase_)
lowerCamelCase__: List[Any] =torch.stack(UpperCAmelCase_)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int = 5 , UpperCAmelCase_ : int = 67 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[int] = None , ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =eos_token_id
lowerCamelCase__: str =None
lowerCamelCase__: Optional[Any] =None
lowerCamelCase__: Any =torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=torch.int)
lowerCamelCase__: Any =torch.zeros(UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=torch.bool)
if input_embeds is not None:
lowerCamelCase__: Union[str, Any] =input_embeds
else:
lowerCamelCase__: Optional[int] =self.transformer.transformer.wte(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
lowerCamelCase__: Dict =self.transformer(inputs_embeds=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =outputs.logits
lowerCamelCase__: Dict =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase__: Dict =logits.softmax(-1).log()
if scores is None:
lowerCamelCase__ , lowerCamelCase__: str =logits.topk(UpperCAmelCase_ , -1)
lowerCamelCase__: List[str] =generated.expand(UpperCAmelCase_ , *generated.shape[1:])
lowerCamelCase__ , lowerCamelCase__: List[str] =next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
lowerCamelCase__: Optional[int] =next_tokens
else:
lowerCamelCase__: Optional[Any] =tokens.expand(UpperCAmelCase_ , *tokens.shape[1:])
lowerCamelCase__: List[str] =torch.cat((tokens, next_tokens) , dim=1)
else:
lowerCamelCase__: Any =-float(np.inf)
lowerCamelCase__: Optional[Any] =0
lowerCamelCase__: Optional[int] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase__: int =scores_sum / seq_lengths[:, None]
lowerCamelCase__ , lowerCamelCase__: Any =scores_sum_average.view(-1).topk(UpperCAmelCase_ , -1)
lowerCamelCase__: List[Any] =next_tokens // scores_sum.shape[1]
lowerCamelCase__: str =seq_lengths[next_tokens_source]
lowerCamelCase__: Optional[int] =next_tokens % scores_sum.shape[1]
lowerCamelCase__: Optional[Any] =next_tokens.unsqueeze(1)
lowerCamelCase__: Dict =tokens[next_tokens_source]
lowerCamelCase__: Union[str, Any] =torch.cat((tokens, next_tokens) , dim=1)
lowerCamelCase__: List[Any] =generated[next_tokens_source]
lowerCamelCase__: List[str] =scores_sum_average * seq_lengths
lowerCamelCase__: Tuple =is_stopped[next_tokens_source]
lowerCamelCase__: Dict =self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
lowerCamelCase__: List[str] =torch.cat((generated, next_token_embed) , dim=1)
lowerCamelCase__: List[Any] =is_stopped + next_tokens.eq(UpperCAmelCase_).squeeze()
if is_stopped.all():
break
lowerCamelCase__: Dict =scores / seq_lengths
lowerCamelCase__: Dict =scores.argsort(descending=UpperCAmelCase_)
# tokens tensors are already padded to max_seq_length
lowerCamelCase__: Tuple =[tokens[i] for i in order]
lowerCamelCase__: List[Any] =torch.stack(UpperCAmelCase_ , dim=0)
lowerCamelCase__: Optional[Any] =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 10 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 1 |
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 10 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
def lowerCAmelCase_ ( __a = 50 ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["input_features", "attention_mask"]
def __init__(self : List[Any] , UpperCAmelCase_ : Dict=80 , UpperCAmelCase_ : Optional[Any]=16_000 , UpperCAmelCase_ : str=80 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Dict =num_mel_bins
lowerCamelCase__: int =do_ceptral_normalize
lowerCamelCase__: int =normalize_means
lowerCamelCase__: Optional[Any] =normalize_vars
lowerCamelCase__: Optional[int] =True
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : np.ndarray , ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCamelCase__: Tuple =torch.from_numpy(UpperCAmelCase_).unsqueeze(0)
lowerCamelCase__: Any =ta_kaldi.fbank(UpperCAmelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : float = 0.0 , ) ->np.ndarray:
'''simple docstring'''
if normalize_means:
lowerCamelCase__: Tuple =x[:input_length].mean(axis=0)
lowerCamelCase__: List[Any] =np.subtract(UpperCAmelCase_ , UpperCAmelCase_)
if normalize_vars:
lowerCamelCase__: int =x[:input_length].std(axis=0)
lowerCamelCase__: List[str] =np.divide(UpperCAmelCase_ , UpperCAmelCase_)
if input_length < x.shape[0]:
lowerCamelCase__: List[Any] =padding_value
# make sure array is in float32
lowerCamelCase__: List[str] =x.astype(np.floataa)
return x
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : List[np.ndarray] , UpperCAmelCase_ : Optional[np.ndarray] = None) ->List[np.ndarray]:
'''simple docstring'''
lowerCamelCase__: Tuple =attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase_ , UpperCAmelCase_ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
def __call__(self : str , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : List[str] , ) ->BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
lowerCamelCase__: Optional[Any] =isinstance(UpperCAmelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
lowerCamelCase__: int =is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCamelCase__: str =[np.asarray(UpperCAmelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray):
lowerCamelCase__: str =np.asarray(UpperCAmelCase_ , dtype=np.floataa)
elif isinstance(UpperCAmelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCamelCase__: Union[str, Any] =raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCamelCase__: List[str] =[raw_speech]
# extract fbank features
lowerCamelCase__: str =[self._extract_fbank_features(UpperCAmelCase_) for waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase__: Any =BatchFeature({"input_features": features})
lowerCamelCase__: Union[str, Any] =self.pad(
UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
# make sure list is in array format
lowerCamelCase__: Optional[Any] =padded_inputs.get("input_features")
if isinstance(input_features[0] , UpperCAmelCase_):
lowerCamelCase__: Tuple =[np.asarray(UpperCAmelCase_ , dtype=np.floataa) for feature in input_features]
lowerCamelCase__: Union[str, Any] =padded_inputs.get("attention_mask")
if attention_mask is not None:
lowerCamelCase__: Optional[int] =[np.asarray(UpperCAmelCase_ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCamelCase__: Any =(
np.array(UpperCAmelCase_ , dtype=np.intaa)
if self._get_padding_strategies(UpperCAmelCase_ , max_length=UpperCAmelCase_) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase__: Any =self.normalize(
padded_inputs["input_features"] , attention_mask=UpperCAmelCase_)
if return_tensors is not None:
lowerCamelCase__: str =padded_inputs.convert_to_tensors(UpperCAmelCase_)
return padded_inputs
| 10 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 1 |
def lowerCAmelCase_ ( __a , __a , __a ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__a ) )
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> bool:
"""simple docstring"""
if index == len(__a ):
return True
# Recursive Step
for i in range(__a ):
if valid_coloring(graph[index] , __a , __a ):
# Color current vertex
lowerCamelCase__: int =i
# Validate coloring
if util_color(__a , __a , __a , index + 1 ):
return True
# Backtrack
lowerCamelCase__: List[Any] =-1
return False
def lowerCAmelCase_ ( __a , __a ) -> list[int]:
"""simple docstring"""
lowerCamelCase__: Dict =[-1] * len(__a )
if util_color(__a , __a , __a , 0 ):
return colored_vertices
return []
| 10 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(UpperCAmelCase_)
def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
if "text_queries" in kwargs:
lowerCamelCase__: Any =kwargs.pop("text_queries")
if isinstance(UpperCAmelCase_ , (str, Image.Image)):
lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels}
else:
lowerCamelCase__: Any =image
lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
return results
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] ={}
if "threshold" in kwargs:
lowerCamelCase__: List[Any] =kwargs["threshold"]
if "top_k" in kwargs:
lowerCamelCase__: Any =kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(inputs["image"])
lowerCamelCase__: Dict =inputs["candidate_labels"]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Any =candidate_labels.split(",")
lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(UpperCAmelCase_):
lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework)
lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(UpperCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model_inputs.pop("target_size")
lowerCamelCase__: Dict =model_inputs.pop("candidate_label")
lowerCamelCase__: Dict =model_inputs.pop("is_last")
lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_)
lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =[]
for model_output in model_outputs:
lowerCamelCase__: Optional[Any] =model_output["candidate_label"]
lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_)
lowerCamelCase__: Dict =self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
lowerCamelCase__: Dict =outputs["scores"][index].item()
lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0])
lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box}
results.append(UpperCAmelCase_)
lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_)
if top_k:
lowerCamelCase__: Dict =results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist()
lowerCamelCase__: Optional[int] ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 10 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =cached_file(UpperCAmelCase_ , UpperCAmelCase_)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_)))
with open(os.path.join(UpperCAmelCase_ , "refs" , "main")) as f:
lowerCamelCase__: List[Any] =f.read()
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "snapshots" , UpperCAmelCase_ , UpperCAmelCase_))
self.assertTrue(os.path.isfile(UpperCAmelCase_))
# File is cached at the same place the second time.
lowerCamelCase__: Optional[Any] =cached_file(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Using a specific revision to test the full commit hash.
lowerCamelCase__: str =cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="9b8c223")
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "snapshots" , UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid model identifier"):
lowerCamelCase__: List[Any] =cached_file("tiny-random-bert" , UpperCAmelCase_)
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid git identifier"):
lowerCamelCase__: List[str] =cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="aaaa")
with self.assertRaisesRegex(UpperCAmelCase_ , "does not appear to have a file named"):
lowerCamelCase__: Optional[Any] =cached_file(UpperCAmelCase_ , "conf")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase_ , "does not appear to have a file named"):
lowerCamelCase__: Dict =cached_file(UpperCAmelCase_ , "conf")
with open(os.path.join(UpperCAmelCase_ , "refs" , "main")) as f:
lowerCamelCase__: List[str] =f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , ".no_exist" , UpperCAmelCase_ , "conf")))
lowerCamelCase__: Union[str, Any] =cached_file(UpperCAmelCase_ , "conf" , _raise_exceptions_for_missing_entries=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =cached_file(UpperCAmelCase_ , "conf" , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
lowerCamelCase__: List[Any] =mock.Mock()
lowerCamelCase__: int =500
lowerCamelCase__: Union[str, Any] ={}
lowerCamelCase__: Any =HTTPError
lowerCamelCase__: List[Any] ={}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_) as mock_head:
lowerCamelCase__: int =cached_file(UpperCAmelCase_ , "conf" , _raise_exceptions_for_connection_errors=UpperCAmelCase_)
self.assertIsNone(UpperCAmelCase_)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple:
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_))
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_))
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt"))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid model identifier"):
get_file_from_repo("bert-base-case" , UpperCAmelCase_)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid git identifier"):
get_file_from_repo("bert-base-cased" , UpperCAmelCase_ , revision="ahaha")
lowerCamelCase__: List[str] =get_file_from_repo("bert-base-cased" , UpperCAmelCase_)
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCamelCase__: List[str] =json.loads(open(UpperCAmelCase_ , "r").read())
self.assertEqual(config["hidden_size"] , 768)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__: List[Any] =Path(UpperCAmelCase_) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ , "a.txt") , str(UpperCAmelCase_))
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , "b.txt"))
| 10 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCAmelCase_ ( ) -> tuple[list[int], int]:
"""simple docstring"""
lowerCamelCase__: List[Any] =[randint(-1000 , 1000 ) for i in range(10 )]
lowerCamelCase__: str =randint(-5000 , 5000 )
return (arr, r)
__A = make_dataset()
def lowerCAmelCase_ ( __a , __a ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__a , 3 ):
if sum(__a ) == target:
return tuple(sorted(__a ) )
return (0, 0, 0)
def lowerCAmelCase_ ( __a , __a ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
lowerCamelCase__: int =len(__a )
for i in range(n - 1 ):
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCAmelCase_ ( ) -> tuple[float, float]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] ="\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
lowerCamelCase__: Any ="\ntriplet_sum1(*dataset)\n"
lowerCamelCase__: Optional[Any] ="\ntriplet_sum2(*dataset)\n"
lowerCamelCase__: int =repeat(setup=__a , stmt=__a , repeat=5 , number=10000 )
lowerCamelCase__: Optional[int] =repeat(setup=__a , stmt=__a , repeat=5 , number=10000 )
return (min(__a ), min(__a ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = solution_times()
print(f'The time for naive implementation is {times[0]}.')
print(f'The time for optimized implementation is {times[1]}.')
| 10 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = XLNetTokenizer
lowercase_ = XLNetTokenizerFast
lowercase_ = True
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__: Optional[Any] =XLNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Any ="<s>"
lowerCamelCase__: Optional[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<unk>")
self.assertEqual(vocab_keys[1] , "<s>")
self.assertEqual(vocab_keys[-1] , "<eod>")
self.assertEqual(len(UpperCAmelCase_) , 1_006)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000)
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =XLNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_)
lowerCamelCase__: List[str] =tokenizer.tokenize("This is a test")
self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [285, 46, 10, 170, 382])
lowerCamelCase__: List[Any] =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase__: Dict =tokenizer.convert_tokens_to_ids(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
lowerCamelCase__: List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase_)
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def SCREAMING_SNAKE_CASE_ (self : str) ->List[str]:
'''simple docstring'''
lowerCamelCase__: int =XLNetTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_)
lowerCamelCase__: List[Any] =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["▁he", "ll", "o"])
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =XLNetTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_)
lowerCamelCase__: Any =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =XLNetTokenizer.from_pretrained("xlnet-base-cased")
lowerCamelCase__: Tuple =tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] ={"input_ids": [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 10 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Optional[int] =hf_hub_url(repo_id=__a , path=__a , revision=__a )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__a )}"""
| 10 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "canine"
def __init__(self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Any=3_072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[int]=16_384 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : List[Any]=1E-1_2 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Optional[int]=0xE000 , UpperCAmelCase_ : List[str]=0xE001 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : Dict=16_384 , UpperCAmelCase_ : Any=128 , **UpperCAmelCase_ : Optional[int] , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: str =max_position_embeddings
lowerCamelCase__: str =hidden_size
lowerCamelCase__: Any =num_hidden_layers
lowerCamelCase__: Optional[int] =num_attention_heads
lowerCamelCase__: Optional[int] =intermediate_size
lowerCamelCase__: Union[str, Any] =hidden_act
lowerCamelCase__: List[str] =hidden_dropout_prob
lowerCamelCase__: Any =attention_probs_dropout_prob
lowerCamelCase__: Tuple =initializer_range
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: Dict =layer_norm_eps
# Character config:
lowerCamelCase__: str =downsampling_rate
lowerCamelCase__: Optional[int] =upsampling_kernel_size
lowerCamelCase__: Union[str, Any] =num_hash_functions
lowerCamelCase__: List[str] =num_hash_buckets
lowerCamelCase__: List[Any] =local_transformer_stride
| 10 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 1 |
def lowerCAmelCase_ ( __a = 10**12 ) -> int:
"""simple docstring"""
lowerCamelCase__: str =1
lowerCamelCase__: List[str] =0
lowerCamelCase__: Optional[int] =1
lowerCamelCase__: List[Any] =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 | 1 |
from math import sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: int =0
lowerCamelCase__: int =0
lowerCamelCase__: int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__a , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: str =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 10 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__A = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] ={}
state_dict.pop("pixel_mean" , __a )
state_dict.pop("pixel_std" , __a )
lowerCamelCase__: str =R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase__: int =key.replace(__a , __a )
if re.match(__a , __a ):
lowerCamelCase__: Optional[Any] =int(re.match(__a , __a ).group(2 ) )
if layer_nb == 0:
lowerCamelCase__: List[str] =key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
lowerCamelCase__: List[str] =key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
lowerCamelCase__: List[str] =key.replace("layers.2" , "proj_out" )
lowerCamelCase__: str =value
lowerCamelCase__: Dict =model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def lowerCAmelCase_ ( __a , __a , __a , __a="ybelkada/segment-anything" ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: str =hf_hub_download(__a , F"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
lowerCamelCase__: Optional[int] =SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase__: str =SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase__: int =SamConfig(
vision_config=__a , )
elif "sam_vit_h" in model_name:
lowerCamelCase__: Optional[int] =SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase__: Dict =SamConfig(
vision_config=__a , )
lowerCamelCase__: int =torch.load(__a , map_location="cpu" )
lowerCamelCase__: int =replace_keys(__a )
lowerCamelCase__: Dict =SamImageProcessor()
lowerCamelCase__: Union[str, Any] =SamProcessor(image_processor=__a )
lowerCamelCase__: Tuple =SamModel(__a )
hf_model.load_state_dict(__a )
lowerCamelCase__: Any =hf_model.to("cuda" )
lowerCamelCase__: Optional[Any] ="https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCamelCase__: str =Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" )
lowerCamelCase__: List[str] =[[[400, 650]]]
lowerCamelCase__: Union[str, Any] =[[1]]
lowerCamelCase__: Tuple =processor(images=np.array(__a ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase__: Any =hf_model(**__a )
lowerCamelCase__: Tuple =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowerCamelCase__: Tuple =processor(
images=np.array(__a ) , input_points=__a , input_labels=__a , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase__: Optional[int] =hf_model(**__a )
lowerCamelCase__: Tuple =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowerCamelCase__: Tuple =((75, 275, 1725, 850),)
lowerCamelCase__: Any =processor(images=np.array(__a ) , input_boxes=__a , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase__: int =hf_model(**__a )
lowerCamelCase__: Optional[Any] =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowerCamelCase__: str =[[[400, 650], [800, 650]]]
lowerCamelCase__: Optional[int] =[[1, 1]]
lowerCamelCase__: Any =processor(
images=np.array(__a ) , input_points=__a , input_labels=__a , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase__: str =hf_model(**__a )
lowerCamelCase__: str =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
__A = argparse.ArgumentParser()
__A = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__A = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 10 |
import itertools
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Optional[int] =2
while True:
if is_prime(__a ):
yield num
num += 1
def lowerCAmelCase_ ( __a = 10001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "sentencepiece.bpe.model"}
__A = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__A = {
"facebook/xglm-564M": 2048,
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any="<s>" , UpperCAmelCase_ : Optional[int]="</s>" , UpperCAmelCase_ : List[Any]="</s>" , UpperCAmelCase_ : Union[str, Any]="<s>" , UpperCAmelCase_ : Any="<unk>" , UpperCAmelCase_ : str="<pad>" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : str , ) ->None:
'''simple docstring'''
lowerCamelCase__: Dict ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCamelCase__: Tuple =7
lowerCamelCase__: int =[F"""<madeupword{i}>""" for i in range(self.num_madeup_words)]
lowerCamelCase__: str =kwargs.get("additional_special_tokens" , [])
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
lowerCamelCase__: Any =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: List[str] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase__: Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase__: Union[str, Any] ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
lowerCamelCase__: Any =len(self.sp_model)
lowerCamelCase__: Tuple ={F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)}
self.fairseq_tokens_to_ids.update(UpperCAmelCase_)
lowerCamelCase__: int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Dict) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.__dict__.copy()
lowerCamelCase__: str =None
lowerCamelCase__: Any =self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Any , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: int ={}
lowerCamelCase__: str =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCamelCase__: Any =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_))
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a) * [0]
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: Optional[int] =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[str]) ->Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Dict) ->Dict:
'''simple docstring'''
lowerCamelCase__: str ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
| 10 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30}
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30}
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =min_resolution
lowerCamelCase__: Union[str, Any] =max_resolution
lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: str =crop_pct
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =do_normalize
lowerCamelCase__: List[str] =image_mean
lowerCamelCase__: Tuple =image_std
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 30})
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30})
lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 10 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->int:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
raise NotImplementedError()
| 10 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: str =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: int =eos_token_id
lowerCamelCase__: Union[str, Any] =pad_token_id
lowerCamelCase__: List[str] =bos_token_id
lowerCamelCase__: int =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =20
lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: str =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: List[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: Dict =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =20
lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: str =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__: Optional[Any] =input_ids.shape[0]
lowerCamelCase__: List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data()
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_)
lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
lowerCamelCase__: int ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("JIT Enabled"):
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id
lowerCamelCase__: str =model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_)
lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
lowerCamelCase__: Any =["Sam"]
lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax")
lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic."
lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 10 | 1 |
import itertools
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Optional[int] =2
while True:
if is_prime(__a ):
yield num
num += 1
def lowerCAmelCase_ ( __a = 10001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "prophetnet.tokenizer"}
__A = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
lowerCamelCase__: int =reader.readlines()
for index, token in enumerate(__a ):
lowerCamelCase__: List[str] =token.rstrip("\n" )
lowerCamelCase__: List[Any] =index
return vocab
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: Optional[int] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
lowerCamelCase__: Optional[int] =F"""[unused{i}]"""
lowerCamelCase__: int =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase__: int =12
lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_)
def __getstate__(self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.__dict__.copy()
lowerCamelCase__: Dict =None
return state
def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: Dict ={}
lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_)) + [1]
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase__: Union[str, Any] =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: str =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 10 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] =parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to the HuggingFace Datasets folder.")
train_parser.set_defaults(func=UpperCAmelCase_)
def __init__(self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , *UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: str =get_logger("datasets-cli/converting")
lowerCamelCase__: Tuple =tfds_path
lowerCamelCase__: Union[str, Any] =datasets_directory
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
if os.path.isdir(self._tfds_path):
lowerCamelCase__: Dict =os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
lowerCamelCase__: Any =os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
lowerCamelCase__: Any =os.path.abspath(self._datasets_directory)
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""")
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Optional[int] =[]
lowerCamelCase__: int ={}
if os.path.isdir(self._tfds_path):
lowerCamelCase__: Tuple =os.listdir(UpperCAmelCase_)
else:
lowerCamelCase__: int =[os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""")
lowerCamelCase__: Tuple =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[str] =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
if not os.path.isfile(UpperCAmelCase_) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(UpperCAmelCase_ , encoding="utf-8") as f:
lowerCamelCase__: Union[str, Any] =f.readlines()
lowerCamelCase__: int =[]
lowerCamelCase__: Any =False
lowerCamelCase__: int =False
lowerCamelCase__: int =[]
for line in lines:
lowerCamelCase__: List[Any] =line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCamelCase__: List[Any] ="import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowerCamelCase__: Union[str, Any] =""
continue
elif "from absl import logging" in out_line:
lowerCamelCase__: Tuple ="from datasets import logging\n"
elif "getLogger" in out_line:
lowerCamelCase__: List[str] =out_line.replace("getLogger" , "get_logger")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
lowerCamelCase__: str =True
lowerCamelCase__: List[Any] =list(filter(lambda UpperCAmelCase_: e in out_line , UpperCAmelCase_))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCAmelCase_) + "\n")
out_lines.append(UpperCAmelCase_)
out_lines.append(UpperCAmelCase_)
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCamelCase__: Dict =re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCamelCase__: Any =re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , UpperCAmelCase_)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
lowerCamelCase__: Any ="from . import " + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCamelCase__: Optional[int] =True
out_lines.append(UpperCAmelCase_)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCamelCase__: Tuple =f_name.replace(".py" , "")
lowerCamelCase__: Optional[int] =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
self._logger.info(F"""Adding directory {output_dir}""")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(UpperCAmelCase_)
if needs_manual_update:
with_manual_update.append(UpperCAmelCase_)
with open(UpperCAmelCase_ , "w" , encoding="utf-8") as f:
f.writelines(UpperCAmelCase_)
self._logger.info(F"""Converted in {output_file}""")
for utils_file in utils_files:
try:
lowerCamelCase__: Union[str, Any] =os.path.basename(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =imports_to_builder_map[f_name.replace(".py" , "")]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""")
shutil.copy(UpperCAmelCase_ , UpperCAmelCase_)
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""")
| 10 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
__A = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
lowerCamelCase__: str =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: Tuple =getattr(__a , __a ).shape
else:
lowerCamelCase__: List[str] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCamelCase__: List[Any] =value
elif weight_type == "weight_g":
lowerCamelCase__: Tuple =value
elif weight_type == "weight_v":
lowerCamelCase__: List[str] =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: str =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Tuple =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Any =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: Tuple =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: Optional[int] =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[Any] ="unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
lowerCamelCase__: str =True
if "*" in mapped_key:
lowerCamelCase__: List[str] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: Tuple =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: str ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Dict ="weight"
else:
lowerCamelCase__: Any =None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =full_name.split("conv_layers." )[-1]
lowerCamelCase__: Tuple =name.split("." )
lowerCamelCase__: Any =int(items[0] )
lowerCamelCase__: Optional[int] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCamelCase__: Any =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCamelCase__: Optional[int] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCamelCase__: str =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> Optional[Any]:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: Dict =UniSpeechSatConfig.from_pretrained(__a )
else:
lowerCamelCase__: Union[str, Any] =UniSpeechSatConfig()
lowerCamelCase__: Any =""
if is_finetuned:
lowerCamelCase__: Optional[int] =UniSpeechSatForCTC(__a )
else:
lowerCamelCase__: Optional[int] =UniSpeechSatForPreTraining(__a )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowerCamelCase__: Optional[Any] =model[0].eval()
recursively_load_weights(__a , __a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | 1 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__A = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__A = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__A = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__A = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__A = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]:
"""simple docstring"""
for tf_name, hf_name in patterns:
lowerCamelCase__: Union[str, Any] =k.replace(__a , __a )
return k
def lowerCAmelCase_ ( __a , __a ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
lowerCamelCase__: Tuple =BigBirdPegasusConfig(**__a )
lowerCamelCase__: Any =BigBirdPegasusForConditionalGeneration(__a )
lowerCamelCase__: List[str] =torch_model.state_dict()
lowerCamelCase__: Any ={}
# separating decoder weights
lowerCamelCase__: Union[str, Any] ={k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
lowerCamelCase__: List[str] ={k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
lowerCamelCase__: Dict =[k.endswith(__a ) for ending in KEYS_TO_IGNORE]
if any(__a ):
continue
lowerCamelCase__: List[Any] =DECODER_PATTERNS
lowerCamelCase__: Union[str, Any] =rename_state_dict_key(__a , __a )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
lowerCamelCase__: Optional[Any] =v.T
lowerCamelCase__: List[Any] =torch.from_numpy(__a )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
lowerCamelCase__: int =[k.endswith(__a ) for ending in KEYS_TO_IGNORE]
if any(__a ):
continue
lowerCamelCase__: str =REMAINING_PATTERNS
lowerCamelCase__: List[Any] =rename_state_dict_key(__a , __a )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
lowerCamelCase__: List[Any] =v.T
lowerCamelCase__: Union[str, Any] =torch.from_numpy(__a )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
lowerCamelCase__: List[Any] =mapping["model.embed_positions.weight"]
lowerCamelCase__: List[Any] =mapping.pop("model.embed_positions.weight" )
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =torch_model.load_state_dict(__a , strict=__a )
lowerCamelCase__: Tuple =[
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: List[str] =tf.train.list_variables(__a )
lowerCamelCase__: int ={}
lowerCamelCase__: Tuple =["global_step"]
for name, shape in tqdm(__a , desc="converting tf checkpoint to dict" ):
lowerCamelCase__: List[Any] =any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCamelCase__: List[str] =tf.train.load_variable(__a , __a )
lowerCamelCase__: Tuple =array
return tf_weights
def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: str =get_tf_weights_as_numpy(__a )
lowerCamelCase__: str =convert_bigbird_pegasus(__a , __a )
torch_model.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__A = parser.parse_args()
__A = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 10 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 1 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["image_processor"]
lowercase_ = "SamImageProcessor"
def __init__(self : Any , UpperCAmelCase_ : Dict) ->Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.image_processor
lowerCamelCase__: str =-10
lowerCamelCase__: Tuple =self.image_processor.size["longest_edge"]
def __call__(self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->BatchEncoding:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# pop arguments that are not used in the foward but used nevertheless
lowerCamelCase__: Tuple =encoding_image_processor["original_sizes"]
if hasattr(UpperCAmelCase_ , "numpy"): # Checks if Torch or TF tensor
lowerCamelCase__: Optional[Any] =original_sizes.numpy()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._check_and_preprocess_points(
input_points=UpperCAmelCase_ , input_labels=UpperCAmelCase_ , input_boxes=UpperCAmelCase_ , )
lowerCamelCase__: Optional[int] =self._normalize_and_convert(
UpperCAmelCase_ , UpperCAmelCase_ , input_points=UpperCAmelCase_ , input_labels=UpperCAmelCase_ , input_boxes=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , )
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Tuple="pt" , ) ->List[str]:
'''simple docstring'''
if input_points is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
lowerCamelCase__: int =[
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , original_sizes[0]) for point in input_points
]
else:
lowerCamelCase__: Tuple =[
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , UpperCAmelCase_)
for point, original_size in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self._pad_points_and_labels(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.array(UpperCAmelCase_)
if input_labels is not None:
lowerCamelCase__: Tuple =np.array(UpperCAmelCase_)
if input_boxes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] =[
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , original_sizes[0] , is_bounding_box=UpperCAmelCase_)
for box in input_boxes
]
else:
lowerCamelCase__: List[Any] =[
self._normalize_coordinates(self.target_size , UpperCAmelCase_ , UpperCAmelCase_ , is_bounding_box=UpperCAmelCase_)
for box, original_size in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
lowerCamelCase__: Optional[int] =np.array(UpperCAmelCase_)
if input_boxes is not None:
if return_tensors == "pt":
lowerCamelCase__: int =torch.from_numpy(UpperCAmelCase_)
# boxes batch size of 1 by default
lowerCamelCase__: int =input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
lowerCamelCase__: Tuple =tf.convert_to_tensor(UpperCAmelCase_)
# boxes batch size of 1 by default
lowerCamelCase__: Optional[int] =tf.expand_dims(UpperCAmelCase_ , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes})
if input_points is not None:
if return_tensors == "pt":
lowerCamelCase__: Optional[Any] =torch.from_numpy(UpperCAmelCase_)
# point batch size of 1 by default
lowerCamelCase__: List[str] =input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
lowerCamelCase__: Tuple =tf.convert_to_tensor(UpperCAmelCase_)
# point batch size of 1 by default
lowerCamelCase__: Union[str, Any] =tf.expand_dims(UpperCAmelCase_ , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({"input_points": input_points})
if input_labels is not None:
if return_tensors == "pt":
lowerCamelCase__: Optional[int] =torch.from_numpy(UpperCAmelCase_)
# point batch size of 1 by default
lowerCamelCase__: Dict =input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
lowerCamelCase__: Union[str, Any] =tf.convert_to_tensor(UpperCAmelCase_)
# point batch size of 1 by default
lowerCamelCase__: Optional[int] =tf.expand_dims(UpperCAmelCase_ , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels})
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: int =max([point.shape[0] for point in input_points])
lowerCamelCase__: Optional[int] =[]
for i, point in enumerate(UpperCAmelCase_):
if point.shape[0] != expected_nb_points:
lowerCamelCase__: int =np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
lowerCamelCase__: Dict =np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =processed_input_points
return input_points, input_labels
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict=False) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str =original_size
lowerCamelCase__ , lowerCamelCase__: str =self.image_processor._get_preprocess_shape(UpperCAmelCase_ , longest_edge=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =deepcopy(UpperCAmelCase_).astype(UpperCAmelCase_)
if is_bounding_box:
lowerCamelCase__: Optional[int] =coords.reshape(-1 , 2 , 2)
lowerCamelCase__: Any =coords[..., 0] * (new_w / old_w)
lowerCamelCase__: Any =coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCamelCase__: str =coords.reshape(-1 , 4)
return coords
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]=None , ) ->Optional[Any]:
'''simple docstring'''
if input_points is not None:
if hasattr(UpperCAmelCase_ , "numpy"): # Checks for TF or Torch tensor
lowerCamelCase__: List[str] =input_points.numpy().tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or not isinstance(input_points[0] , UpperCAmelCase_):
raise ValueError("Input points must be a list of list of floating points.")
lowerCamelCase__: Dict =[np.array(UpperCAmelCase_) for input_point in input_points]
else:
lowerCamelCase__: List[str] =None
if input_labels is not None:
if hasattr(UpperCAmelCase_ , "numpy"):
lowerCamelCase__: str =input_labels.numpy().tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or not isinstance(input_labels[0] , UpperCAmelCase_):
raise ValueError("Input labels must be a list of list integers.")
lowerCamelCase__: Tuple =[np.array(UpperCAmelCase_) for label in input_labels]
else:
lowerCamelCase__: Optional[Any] =None
if input_boxes is not None:
if hasattr(UpperCAmelCase_ , "numpy"):
lowerCamelCase__: str =input_boxes.numpy().tolist()
if (
not isinstance(UpperCAmelCase_ , UpperCAmelCase_)
or not isinstance(input_boxes[0] , UpperCAmelCase_)
or not isinstance(input_boxes[0][0] , UpperCAmelCase_)
):
raise ValueError("Input boxes must be a list of list of list of floating points.")
lowerCamelCase__: int =[np.array(UpperCAmelCase_).astype(np.floataa) for box in input_boxes]
else:
lowerCamelCase__: Any =None
return input_points, input_labels, input_boxes
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.image_processor.model_input_names
return list(dict.fromkeys(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str) ->Tuple:
'''simple docstring'''
return self.image_processor.post_process_masks(*UpperCAmelCase_ , **UpperCAmelCase_)
| 10 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 1 |
def lowerCAmelCase_ ( __a = 10**9 ) -> int:
"""simple docstring"""
lowerCamelCase__: str =1
lowerCamelCase__: Optional[int] =2
lowerCamelCase__: List[str] =0
lowerCamelCase__: Dict =0
lowerCamelCase__: Any =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCamelCase__: Optional[Any] =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A = logging.get_logger(__name__)
__A = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "deberta-v2"
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=128_100 , UpperCAmelCase_ : Optional[int]=1_536 , UpperCAmelCase_ : Union[str, Any]=24 , UpperCAmelCase_ : int=24 , UpperCAmelCase_ : Dict=6_144 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : List[Any]=1E-7 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=-1 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : List[str]="gelu" , **UpperCAmelCase_ : Optional[int] , ) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =num_hidden_layers
lowerCamelCase__: Dict =num_attention_heads
lowerCamelCase__: Tuple =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: int =hidden_dropout_prob
lowerCamelCase__: Optional[Any] =attention_probs_dropout_prob
lowerCamelCase__: List[Any] =max_position_embeddings
lowerCamelCase__: Union[str, Any] =type_vocab_size
lowerCamelCase__: List[Any] =initializer_range
lowerCamelCase__: List[Any] =relative_attention
lowerCamelCase__: List[str] =max_relative_positions
lowerCamelCase__: str =pad_token_id
lowerCamelCase__: Optional[Any] =position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_) == str:
lowerCamelCase__: Any =[x.strip() for x in pos_att_type.lower().split("|")]
lowerCamelCase__: Tuple =pos_att_type
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: Tuple =layer_norm_eps
lowerCamelCase__: Union[str, Any] =kwargs.get("pooler_hidden_size" , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =pooler_dropout
lowerCamelCase__: Union[str, Any] =pooler_hidden_act
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: Tuple ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__: Dict ={0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)])
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
@property
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
return 12
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , ) ->Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_)
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 10 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__A = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__A = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase_ ( __a , __a ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(__a ) - np.asarray(__a )) ** 2 ) )
def lowerCAmelCase_ ( __a , __a ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(__a , __a ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
benchmark()
| 10 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase_ ( __a , __a = "cpu" , __a = None ) -> None:
"""simple docstring"""
lowerCamelCase__: int =torch.load(__a , map_location=__a )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__a , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowerCamelCase__: Union[str, Any] =v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__: List[str] =src_path
torch.save(__a , __a )
if __name__ == "__main__":
fire.Fire(convert)
| 10 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(UpperCAmelCase_)
def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
if "text_queries" in kwargs:
lowerCamelCase__: Any =kwargs.pop("text_queries")
if isinstance(UpperCAmelCase_ , (str, Image.Image)):
lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels}
else:
lowerCamelCase__: Any =image
lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
return results
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] ={}
if "threshold" in kwargs:
lowerCamelCase__: List[Any] =kwargs["threshold"]
if "top_k" in kwargs:
lowerCamelCase__: Any =kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(inputs["image"])
lowerCamelCase__: Dict =inputs["candidate_labels"]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Any =candidate_labels.split(",")
lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(UpperCAmelCase_):
lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework)
lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(UpperCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model_inputs.pop("target_size")
lowerCamelCase__: Dict =model_inputs.pop("candidate_label")
lowerCamelCase__: Dict =model_inputs.pop("is_last")
lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_)
lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =[]
for model_output in model_outputs:
lowerCamelCase__: Optional[Any] =model_output["candidate_label"]
lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_)
lowerCamelCase__: Dict =self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
lowerCamelCase__: Dict =outputs["scores"][index].item()
lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0])
lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box}
results.append(UpperCAmelCase_)
lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_)
if top_k:
lowerCamelCase__: Dict =results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist()
lowerCamelCase__: Optional[int] ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 10 | 1 |
__A = "Input must be a string of 8 numbers plus letter"
__A = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
if not isinstance(__a , __a ):
lowerCamelCase__: Union[str, Any] =F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
lowerCamelCase__: Any =spanish_id.replace("-" , "" ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
lowerCamelCase__: Any =int(spanish_id_clean[0:8] )
lowerCamelCase__: Dict =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 1 |
from __future__ import annotations
__A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
lowerCamelCase__: List[str] =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__a ) )
] # the reference grid
lowerCamelCase__: Optional[int] =1
lowerCamelCase__: int =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__a ) )
] # the action grid
lowerCamelCase__: Dict =init[0]
lowerCamelCase__: Dict =init[1]
lowerCamelCase__: List[str] =0
lowerCamelCase__: int =g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__: Optional[int] =[[f, g, x, y]]
lowerCamelCase__: Tuple =False # flag that is set when search is complete
lowerCamelCase__: Union[str, Any] =False # flag set if we can't find expand
while not found and not resign:
if len(__a ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__: int =cell.pop()
lowerCamelCase__: List[str] =next_cell[2]
lowerCamelCase__: List[Any] =next_cell[3]
lowerCamelCase__: Dict =next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__: Union[str, Any] =True
else:
for i in range(len(__a ) ): # to try out different valid actions
lowerCamelCase__: Any =x + DIRECTIONS[i][0]
lowerCamelCase__: List[Any] =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__: Optional[int] =g + cost
lowerCamelCase__: Optional[int] =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__: int =1
lowerCamelCase__: Any =i
lowerCamelCase__: Tuple =[]
lowerCamelCase__: Any =goal[0]
lowerCamelCase__: Union[str, Any] =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__: Any =x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__: int =y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__: Any =xa
lowerCamelCase__: int =ya
invpath.append([x, y] )
lowerCamelCase__: str =[]
for i in range(len(__a ) ):
path.append(invpath[len(__a ) - 1 - i] )
return path, action
if __name__ == "__main__":
__A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A = [0, 0]
# all coordinates are given in format [y,x]
__A = [len(grid) - 1, len(grid[0]) - 1]
__A = 1
# the cost map which pushes the path closer to the goal
__A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A = 99
__A , __A = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 10 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 1 |
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if len(__a ) != len(__a ):
raise ValueError("String lengths must match!" )
lowerCamelCase__: Dict =0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCAmelCase_ ( __a , __a , __a , __a = 100 , ) -> float:
"""simple docstring"""
lowerCamelCase__: List[str] =x_start
lowerCamelCase__: List[str] =fnc(__a )
lowerCamelCase__: Union[str, Any] =0.0
for _ in range(__a ):
# Approximates curve as a sequence of linear lines and sums their length
lowerCamelCase__: int =(x_end - x_start) / steps + xa
lowerCamelCase__: Tuple =fnc(__a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowerCamelCase__: Optional[Any] =xa
lowerCamelCase__: List[str] =fxa
return length
if __name__ == "__main__":
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
__A = 10
while i <= 10_0000:
print(f'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 10 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase_ ( __a , __a ) -> Dict:
"""simple docstring"""
return (-y * np.log(__a ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =np.dot(__a , __a )
return np.sum(y * scores - np.log(1 + np.exp(__a ) ) )
def lowerCAmelCase_ ( __a , __a , __a , __a=70000 ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =np.zeros(x.shape[1] )
for iterations in range(__a ):
lowerCamelCase__: Tuple =np.dot(__a , __a )
lowerCamelCase__: Union[str, Any] =sigmoid_function(__a )
lowerCamelCase__: Union[str, Any] =np.dot(x.T , h - y ) / y.size
lowerCamelCase__: Optional[Any] =theta - alpha * gradient # updating the weights
lowerCamelCase__: List[str] =np.dot(__a , __a )
lowerCamelCase__: List[str] =sigmoid_function(__a )
lowerCamelCase__: str =cost_function(__a , __a )
if iterations % 100 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__A = datasets.load_iris()
__A = iris.data[:, :2]
__A = (iris.target != 0) * 1
__A = 0.1
__A = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
return sigmoid_function(
np.dot(__a , __a ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((__A) , (__A)) = (x[:, 0].min(), x[:, 0].max())
((__A) , (__A)) = (x[:, 1].min(), x[:, 1].max())
((__A) , (__A)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__A = np.c_[xxa.ravel(), xxa.ravel()]
__A = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 10 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 1 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = GPTSanJapaneseTokenizer
lowercase_ = False
lowercase_ = {"do_clean_text": False, "add_prefix_space": False}
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase__: List[str] =["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase__: Dict ={"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
lowerCamelCase__: List[str] ={"unk_token": "<unk>"}
lowerCamelCase__: int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
with open(self.emoji_file , "w") as emoji_writer:
emoji_writer.write(json.dumps(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int]) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ="こんにちは、世界。 \nこんばんは、㔺界。😀"
lowerCamelCase__: List[Any] ="こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =self.get_input_output_texts(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_)
return text, ids
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->int:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.get_tokenizer()
# Testing tokenization
lowerCamelCase__: int ="こんにちは、世界。 こんばんは、㔺界。"
lowerCamelCase__: Optional[Any] =["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
lowerCamelCase__: Optional[Any] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Testing conversion to ids without special tokens
lowerCamelCase__: Optional[Any] =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase__: Tuple =tokenizer.convert_tokens_to_ids(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Testing conversion to ids with special tokens
lowerCamelCase__: Union[str, Any] =tokens + [tokenizer.unk_token]
lowerCamelCase__: Optional[int] =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase__: Tuple =tokenizer.convert_tokens_to_ids(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.get_tokenizer()
# Testing tokenization
lowerCamelCase__: List[Any] ="こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
lowerCamelCase__: List[str] ="こんにちは、、、、世界。こんばんは、、、、世界。"
lowerCamelCase__: Union[str, Any] =tokenizer.encode(UpperCAmelCase_)
lowerCamelCase__: Dict =tokenizer.decode(UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese")
# Testing tokenization
lowerCamelCase__: Any ="こんにちは、世界。"
lowerCamelCase__: Tuple ="こんばんは、㔺界。😀"
lowerCamelCase__: List[str] ="こんにちは、世界。こんばんは、世界。😀"
lowerCamelCase__: Dict =tokenizer.encode(prefix_text + input_text)
lowerCamelCase__: List[Any] =tokenizer.encode("" , prefix_text=prefix_text + input_text)
lowerCamelCase__: List[str] =tokenizer.encode(UpperCAmelCase_ , prefix_text=UpperCAmelCase_)
lowerCamelCase__: Tuple =tokenizer.decode(UpperCAmelCase_)
lowerCamelCase__: Tuple =tokenizer.decode(UpperCAmelCase_)
lowerCamelCase__: Any =tokenizer.decode(UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] =self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese")
# Testing tokenization
lowerCamelCase__: List[str] ="こんにちは、世界。"
lowerCamelCase__: Dict ="こんばんは、㔺界。😀"
lowerCamelCase__: List[str] =len(tokenizer.encode(UpperCAmelCase_)) - 2
lowerCamelCase__: int =len(tokenizer.encode(UpperCAmelCase_)) - 2
lowerCamelCase__: List[Any] =[1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase__: int =[1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase__: Union[str, Any] =[1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase__: Optional[int] =tokenizer(prefix_text + input_text).token_type_ids
lowerCamelCase__: List[str] =tokenizer("" , prefix_text=prefix_text + input_text).token_type_ids
lowerCamelCase__: List[Any] =tokenizer(UpperCAmelCase_ , prefix_text=UpperCAmelCase_).token_type_ids
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese")
lowerCamelCase__: List[Any] =tokenizer.encode("あンいワ")
lowerCamelCase__: Optional[int] =tokenizer.encode("" , prefix_text="あンいワ")
lowerCamelCase__: List[Any] =tokenizer.encode("いワ" , prefix_text="あン")
self.assertEqual(tokenizer.decode(UpperCAmelCase_) , tokenizer.decode(UpperCAmelCase_))
self.assertEqual(tokenizer.decode(UpperCAmelCase_) , tokenizer.decode(UpperCAmelCase_))
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(x_token_a[1] , x_token_a[-1]) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3]) # SEG token
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese")
lowerCamelCase__: List[Any] =[["武田信玄", "は、"], ["織田信長", "の配下の、"]]
lowerCamelCase__: Union[str, Any] =tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokenizer.batch_encode_plus(UpperCAmelCase_ , padding=UpperCAmelCase_)
# fmt: off
lowerCamelCase__: Any =[[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
lowerCamelCase__: Optional[Any] =[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase__: List[Any] =[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCAmelCase_)
self.assertListEqual(x_token.token_type_ids , UpperCAmelCase_)
self.assertListEqual(x_token.attention_mask , UpperCAmelCase_)
self.assertListEqual(x_token_a.input_ids , UpperCAmelCase_)
self.assertListEqual(x_token_a.token_type_ids , UpperCAmelCase_)
self.assertListEqual(x_token_a.attention_mask , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Dict:
'''simple docstring'''
pass
| 10 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: str =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 10 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__A = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DPTFeatureExtractor"]
__A = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import itertools
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Optional[int] =2
while True:
if is_prime(__a ):
yield num
num += 1
def lowerCAmelCase_ ( __a = 10001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
from typing import Any
def lowerCAmelCase_ ( __a ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
lowerCamelCase__: int =[input_list.count(__a ) for value in input_list]
lowerCamelCase__: int =max(__a ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(__a ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30}
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30}
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =min_resolution
lowerCamelCase__: Union[str, Any] =max_resolution
lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: str =crop_pct
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =do_normalize
lowerCamelCase__: List[str] =image_mean
lowerCamelCase__: Tuple =image_std
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 30})
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30})
lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 10 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =tmp_path / "file.csv"
lowerCamelCase__: Optional[int] =textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(__a , "w" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: str =tmp_path / "malformed_file.csv"
lowerCamelCase__: List[Any] =textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(__a , "w" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCAmelCase_ ( __a , __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: Tuple =tmp_path / "csv_with_image.csv"
lowerCamelCase__: str =textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(__a , "w" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: Tuple =tmp_path / "csv_with_label.csv"
lowerCamelCase__: Optional[Any] =textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(__a , "w" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Dict =tmp_path / "csv_with_int_list.csv"
lowerCamelCase__: str =textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(__a , "w" ) as f:
f.write(__a )
return str(__a )
def lowerCAmelCase_ ( __a , __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: List[Any] =Csv()
lowerCamelCase__: Union[str, Any] =csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__a , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(__a ) in record.message
for record in caplog.records )
@require_pil
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
with open(__a , encoding="utf-8" ) as f:
lowerCamelCase__: Optional[Any] =f.read().splitlines()[1]
lowerCamelCase__: Any =Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
lowerCamelCase__: int =csv._generate_tables([[csv_file_with_image]] )
lowerCamelCase__: Tuple =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
lowerCamelCase__: List[str] =pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
with open(__a , encoding="utf-8" ) as f:
lowerCamelCase__: str =f.read().splitlines()[1:]
lowerCamelCase__: Any =Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
lowerCamelCase__: Optional[int] =csv._generate_tables([[csv_file_with_label]] )
lowerCamelCase__: Dict =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
lowerCamelCase__: Optional[int] =pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(__a ) for label in labels]
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[Any] =Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda __a : [int(__a ) for i in x.split()]} )
lowerCamelCase__: Tuple =csv._generate_tables([[csv_file_with_int_list]] )
lowerCamelCase__: Dict =pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
lowerCamelCase__: Any =pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 10 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if gpta_config_file == "":
lowerCamelCase__: Optional[Any] =GPTaConfig()
else:
lowerCamelCase__: Optional[Any] =GPTaConfig.from_json_file(__a )
lowerCamelCase__: Any =GPTaModel(__a )
# Load weights from numpy
load_tf_weights_in_gpta(__a , __a , __a )
# Save pytorch-model
lowerCamelCase__: Any =pytorch_dump_folder_path + "/" + WEIGHTS_NAME
lowerCamelCase__: Optional[Any] =pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , __a )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__A = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 10 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: str =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: int =eos_token_id
lowerCamelCase__: Union[str, Any] =pad_token_id
lowerCamelCase__: List[str] =bos_token_id
lowerCamelCase__: int =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =20
lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: str =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: List[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: Dict =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =20
lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: str =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__: Optional[Any] =input_ids.shape[0]
lowerCamelCase__: List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data()
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_)
lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
lowerCamelCase__: int ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("JIT Enabled"):
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id
lowerCamelCase__: str =model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_)
lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
lowerCamelCase__: Any =["Sam"]
lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax")
lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic."
lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 10 | 1 |
import math
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 1:
lowerCamelCase__: List[str] =F"""Input value of [number={number}] must be > 0"""
raise ValueError(__a )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCamelCase__: Optional[int] =int(math.log(number // 3 , 2 ) ) + 2
lowerCamelCase__: int =[3, 5]
lowerCamelCase__: int =2
lowerCamelCase__: Tuple =3
for block in range(1 , __a ):
for _ in range(__a ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__A = 0
try:
__A = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 10 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "prophetnet.tokenizer"}
__A = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
lowerCamelCase__: int =reader.readlines()
for index, token in enumerate(__a ):
lowerCamelCase__: List[str] =token.rstrip("\n" )
lowerCamelCase__: List[Any] =index
return vocab
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: Optional[int] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
lowerCamelCase__: Optional[int] =F"""[unused{i}]"""
lowerCamelCase__: int =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase__: int =12
lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_)
def __getstate__(self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.__dict__.copy()
lowerCamelCase__: Dict =None
return state
def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: Dict ={}
lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_)) + [1]
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase__: Union[str, Any] =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
lowerCamelCase__: Dict =get_activation("gelu")
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase_) , torch_builtin(UpperCAmelCase_)))
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase_) , gelu_new(UpperCAmelCase_)))
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
lowerCamelCase__: str =get_activation("gelu")
lowerCamelCase__: Union[str, Any] =get_activation("gelu_10")
lowerCamelCase__: Dict =torch_builtin(UpperCAmelCase_)
lowerCamelCase__: Any =geluaa(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(UpperCAmelCase_).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
get_activation("gelu")
get_activation("gelu_10")
get_activation("gelu_fast")
get_activation("gelu_new")
get_activation("gelu_python")
get_activation("gelu_pytorch_tanh")
get_activation("linear")
get_activation("mish")
get_activation("quick_gelu")
get_activation("relu")
get_activation("sigmoid")
get_activation("silu")
get_activation("swish")
get_activation("tanh")
with self.assertRaises(UpperCAmelCase_):
get_activation("bogus")
with self.assertRaises(UpperCAmelCase_):
get_activation(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =get_activation("gelu")
lowerCamelCase__: str =1
lowerCamelCase__: Union[str, Any] =get_activation("gelu")
self.assertEqual(acta.a , 1)
with self.assertRaises(UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] =acta.a
| 10 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
import os
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =len(grid[0] )
lowerCamelCase__: List[Any] =len(__a )
lowerCamelCase__: str =0
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[int] =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__a ):
for j in range(n_rows - 3 ):
lowerCamelCase__: Any =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCamelCase__: List[Any] =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCamelCase__: List[Any] =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCamelCase__: List[Any] =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCamelCase__: List[Any] =max(
__a , __a , __a , __a )
if max_product > largest:
lowerCamelCase__: Union[str, Any] =max_product
return largest
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =[]
with open(os.path.dirname(__a ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
lowerCamelCase__: str =[[int(__a ) for i in grid[j]] for j in range(len(__a ) )]
return largest_product(__a )
if __name__ == "__main__":
print(solution())
| 10 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Optional[Any]=30 , UpperCAmelCase_ : Optional[int]=400 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Tuple=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=1 / 255 , UpperCAmelCase_ : Any=True , ) ->str:
'''simple docstring'''
lowerCamelCase__: int =size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
lowerCamelCase__: Tuple =parent
lowerCamelCase__: Union[str, Any] =batch_size
lowerCamelCase__: Tuple =num_channels
lowerCamelCase__: int =min_resolution
lowerCamelCase__: int =max_resolution
lowerCamelCase__: Dict =do_resize
lowerCamelCase__: List[Any] =size
lowerCamelCase__: Any =do_normalize
lowerCamelCase__: Optional[Any] =image_mean
lowerCamelCase__: int =image_std
lowerCamelCase__: Tuple =do_rescale
lowerCamelCase__: int =rescale_factor
lowerCamelCase__: int =do_pad
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=False) ->str:
'''simple docstring'''
if not batched:
lowerCamelCase__: Any =image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image):
lowerCamelCase__ , lowerCamelCase__: List[str] =image.size
else:
lowerCamelCase__ , lowerCamelCase__: List[Any] =image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__: Any =int(self.size["shortest_edge"] * h / w)
lowerCamelCase__: int =self.size["shortest_edge"]
elif w > h:
lowerCamelCase__: int =self.size["shortest_edge"]
lowerCamelCase__: int =int(self.size["shortest_edge"] * w / h)
else:
lowerCamelCase__: Union[str, Any] =self.size["shortest_edge"]
lowerCamelCase__: Any =self.size["shortest_edge"]
else:
lowerCamelCase__: Optional[int] =[]
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
lowerCamelCase__: Tuple =max(UpperCAmelCase_ , key=lambda UpperCAmelCase_: item[0])[0]
lowerCamelCase__: Union[str, Any] =max(UpperCAmelCase_ , key=lambda UpperCAmelCase_: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = DetaImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =DetaImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_rescale"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_pad"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333})
self.assertEqual(image_processor.do_pad , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.image_processor_tester.get_expected_values(UpperCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__: List[str] =self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_)
lowerCamelCase__: Dict =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: str =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: str =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.image_processor_tester.get_expected_values(UpperCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__: Dict =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
lowerCamelCase__ , lowerCamelCase__: int =self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: str =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.image_processor_tester.get_expected_values(UpperCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r") as f:
lowerCamelCase__: str =json.loads(f.read())
lowerCamelCase__: Optional[int] ={"image_id": 39_769, "annotations": target}
# encode them
lowerCamelCase__: List[str] =DetaImageProcessor()
lowerCamelCase__: int =image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors="pt")
# verify pixel values
lowerCamelCase__: Dict =torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase_)
lowerCamelCase__: Dict =torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4))
# verify area
lowerCamelCase__: int =torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase_))
# verify boxes
lowerCamelCase__: str =torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase_)
lowerCamelCase__: str =torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase_ , atol=1E-3))
# verify image_id
lowerCamelCase__: Optional[Any] =torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase_))
# verify is_crowd
lowerCamelCase__: Dict =torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase_))
# verify class_labels
lowerCamelCase__: int =torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase_))
# verify orig_size
lowerCamelCase__: Union[str, Any] =torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase_))
# verify size
lowerCamelCase__: Optional[int] =torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase_))
@slow
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r") as f:
lowerCamelCase__: List[Any] =json.loads(f.read())
lowerCamelCase__: Any ={"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
lowerCamelCase__: Dict =pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# encode them
lowerCamelCase__: Union[str, Any] =DetaImageProcessor(format="coco_panoptic")
lowerCamelCase__: Union[str, Any] =image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors="pt")
# verify pixel values
lowerCamelCase__: List[Any] =torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4))
# verify area
lowerCamelCase__: Any =torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase_))
# verify boxes
lowerCamelCase__: List[str] =torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase_ , atol=1E-3))
# verify image_id
lowerCamelCase__: Dict =torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase_))
# verify is_crowd
lowerCamelCase__: Optional[int] =torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase_))
# verify class_labels
lowerCamelCase__: Union[str, Any] =torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase_))
# verify masks
lowerCamelCase__: int =822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCAmelCase_)
# verify orig_size
lowerCamelCase__: List[Any] =torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase_))
# verify size
lowerCamelCase__: int =torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase_))
| 10 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 1 |
def lowerCAmelCase_ ( __a , __a ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A = ["gpt2"]
__A = "gpt2"
if is_tf_available():
class _SCREAMING_SNAKE_CASE ( tf.Module ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : List[Any]) ->Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Optional[Any] =tokenizer
lowerCamelCase__: List[Any] =AutoConfig.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Tuple =TFGPTaLMHeadModel.from_config(UpperCAmelCase_)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text"),))
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.tokenizer(UpperCAmelCase_)
lowerCamelCase__: str =tokenized["input_ids"].to_tensor()
lowerCamelCase__: Union[str, Any] =tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCamelCase__: Union[str, Any] =self.model(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)["logits"]
return outputs
@require_tf
@require_keras_nlp
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
super().setUp()
lowerCamelCase__: Tuple =[GPTaTokenizer.from_pretrained(UpperCAmelCase_) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCamelCase__: Union[str, Any] =[TFGPTaTokenizer.from_pretrained(UpperCAmelCase_) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
lowerCamelCase__: Union[str, Any] =[
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
lowerCamelCase__: Optional[Any] =list(zip(self.test_sentences , self.test_sentences[::-1]))
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
lowerCamelCase__: Any =tokenizer([test_inputs] , return_tensors="tf")
lowerCamelCase__: List[Any] =tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCamelCase__: Tuple =python_outputs[key].numpy()
lowerCamelCase__: Dict =tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCAmelCase_ , tf.intaa) == tf_outputs_values))
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__: Union[str, Any] =tf.function(UpperCAmelCase_)
for test_inputs in self.test_sentences:
lowerCamelCase__: List[str] =tf.constant(UpperCAmelCase_)
lowerCamelCase__: Any =compiled_tokenizer(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tf_tokenizer(UpperCAmelCase_)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__: Any =ModelToSave(tokenizer=UpperCAmelCase_)
lowerCamelCase__: List[Any] =tf.convert_to_tensor([self.test_sentences[0]])
lowerCamelCase__: Union[str, Any] =model.serving(UpperCAmelCase_) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase__: Optional[int] =Path(UpperCAmelCase_) / "saved.model"
tf.saved_model.save(UpperCAmelCase_ , UpperCAmelCase_ , signatures={"serving_default": model.serving})
lowerCamelCase__: str =tf.saved_model.load(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =loaded_model.signatures["serving_default"](UpperCAmelCase_)["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__: int =tf.convert_to_tensor([self.test_sentences[0]])
lowerCamelCase__: Optional[int] =tf_tokenizer(UpperCAmelCase_) # Build model with some sample inputs
lowerCamelCase__: Union[str, Any] =tf_tokenizer.get_config()
lowerCamelCase__: str =TFGPTaTokenizer.from_config(UpperCAmelCase_)
lowerCamelCase__: Any =model_from_config(UpperCAmelCase_)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCamelCase__: List[Any] =123_123
for max_length in [3, 5, 1_024]:
lowerCamelCase__: Tuple =tf.convert_to_tensor([self.test_sentences[0]])
lowerCamelCase__: List[str] =tf_tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_)
lowerCamelCase__: List[Any] =out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 10 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase__: Tuple =[]
lowerCamelCase__: Tuple =1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
lowerCamelCase__: List[Any] ="".join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 10 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__A = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "maskformer"
lowercase_ = {"hidden_size": "mask_feature_size"}
lowercase_ = ["resnet", "swin"]
lowercase_ = ["detr"]
def __init__(self : Optional[int] , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[Dict] = None , UpperCAmelCase_ : Optional[Dict] = None , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : float = 20.0 , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : Optional[Any] , ) ->str:
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCamelCase__: Any =SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: List[str] =backbone_config.pop("model_type")
lowerCamelCase__: int =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__: List[Any] =config_class.from_dict(UpperCAmelCase_)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported)}""")
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCamelCase__: Tuple =DetrConfig()
else:
# verify that the decoder is supported
lowerCamelCase__: Any =(
decoder_config.pop("model_type") if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported)}""")
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] =CONFIG_MAPPING[decoder_type]
lowerCamelCase__: List[Any] =config_class.from_dict(UpperCAmelCase_)
lowerCamelCase__: List[str] =backbone_config
lowerCamelCase__: Dict =decoder_config
# main feature dimension for the model
lowerCamelCase__: Any =fpn_feature_size
lowerCamelCase__: Optional[int] =mask_feature_size
# initializer
lowerCamelCase__: Union[str, Any] =init_std
lowerCamelCase__: str =init_xavier_std
# Hungarian matcher && loss
lowerCamelCase__: Tuple =cross_entropy_weight
lowerCamelCase__: Union[str, Any] =dice_weight
lowerCamelCase__: List[str] =mask_weight
lowerCamelCase__: List[str] =use_auxiliary_loss
lowerCamelCase__: List[str] =no_object_weight
lowerCamelCase__: Dict =output_auxiliary_logits
lowerCamelCase__: Any =self.decoder_config.encoder_attention_heads
lowerCamelCase__: List[Any] =self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : List[str] , UpperCAmelCase_ : PretrainedConfig , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
return cls(
backbone_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict[str, any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =copy.deepcopy(self.__dict__)
lowerCamelCase__: Any =self.backbone_config.to_dict()
lowerCamelCase__: Any =self.decoder_config.to_dict()
lowerCamelCase__: List[str] =self.__class__.model_type
return output
| 10 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 1 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: str =os.path.abspath(__a )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowerCamelCase__: Any =tf.train.list_variables(__a )
lowerCamelCase__: Dict =[]
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: Optional[int] =[]
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCamelCase__: List[str] =full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCamelCase__: int =name[1:]
# figure out how many levels deep the name is
lowerCamelCase__: Optional[int] =0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(__a )
# read data
lowerCamelCase__: List[Any] =tf.train.load_variable(__a , __a )
names.append("/".join(__a ) )
arrays.append(__a )
logger.info(F"""Read a total of {len(__a ):,} layers""" )
# Sanity check
if len(set(__a ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(__a ) )})""" )
lowerCamelCase__: Optional[Any] =list(set(__a ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(__a , __a ):
lowerCamelCase__: Tuple =full_name.split("/" )
lowerCamelCase__: Union[str, Any] =model
lowerCamelCase__: Dict =[]
for i, m_name in enumerate(__a ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
lowerCamelCase__: Optional[Any] =int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
lowerCamelCase__: Optional[Any] =getattr(__a , "embeddings" )
lowerCamelCase__: Union[str, Any] =getattr(__a , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
lowerCamelCase__: Any =getattr(__a , "encoder" )
lowerCamelCase__: str =getattr(__a , "layer" )
lowerCamelCase__: Optional[Any] =pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
lowerCamelCase__: Any =getattr(__a , "pooler" )
lowerCamelCase__: Tuple =getattr(__a , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
lowerCamelCase__: List[Any] =getattr(__a , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
lowerCamelCase__: List[str] =getattr(__a , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
lowerCamelCase__: str =getattr(__a , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
lowerCamelCase__: Dict =getattr(__a , "token_type_embeddings" )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append("weight" )
lowerCamelCase__: Optional[int] =getattr(__a , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
lowerCamelCase__: Any =getattr(__a , "attention" )
lowerCamelCase__: Tuple =getattr(__a , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
lowerCamelCase__: Optional[int] =getattr(__a , "attention" )
lowerCamelCase__: Any =getattr(__a , "output" )
lowerCamelCase__: Union[str, Any] =getattr(__a , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
lowerCamelCase__: List[str] =getattr(__a , "attention" )
lowerCamelCase__: Dict =getattr(__a , "output" )
lowerCamelCase__: str =getattr(__a , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
lowerCamelCase__: Optional[Any] =getattr(__a , "output" )
lowerCamelCase__: Tuple =getattr(__a , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
lowerCamelCase__: List[Any] =getattr(__a , "output" )
lowerCamelCase__: Optional[Any] =getattr(__a , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
lowerCamelCase__: List[Any] =getattr(__a , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
lowerCamelCase__: Tuple =getattr(__a , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
lowerCamelCase__: Any =getattr(__a , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
lowerCamelCase__: Union[str, Any] =getattr(__a , "intermediate" )
lowerCamelCase__: List[str] =getattr(__a , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
lowerCamelCase__: List[Any] =getattr(__a , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
lowerCamelCase__: Any =getattr(__a , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
lowerCamelCase__: Any =getattr(__a , "weight" )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowerCamelCase__: Dict =".".join(__a )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , __a ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , __a ):
lowerCamelCase__: str =array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCamelCase__: Tuple =array.transpose()
if pointer.shape == array.shape:
lowerCamelCase__: Optional[int] =torch.from_numpy(__a )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def lowerCAmelCase_ ( __a , __a , __a ) -> Tuple:
"""simple docstring"""
logger.info(F"""Loading model based on config from {config_path}...""" )
lowerCamelCase__: str =BertConfig.from_json_file(__a )
lowerCamelCase__: Union[str, Any] =BertModel(__a )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(__a , __a , __a )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
__A = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 10 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(UpperCAmelCase_)
def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
if "text_queries" in kwargs:
lowerCamelCase__: Any =kwargs.pop("text_queries")
if isinstance(UpperCAmelCase_ , (str, Image.Image)):
lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels}
else:
lowerCamelCase__: Any =image
lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
return results
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] ={}
if "threshold" in kwargs:
lowerCamelCase__: List[Any] =kwargs["threshold"]
if "top_k" in kwargs:
lowerCamelCase__: Any =kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(inputs["image"])
lowerCamelCase__: Dict =inputs["candidate_labels"]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Any =candidate_labels.split(",")
lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(UpperCAmelCase_):
lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework)
lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(UpperCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model_inputs.pop("target_size")
lowerCamelCase__: Dict =model_inputs.pop("candidate_label")
lowerCamelCase__: Dict =model_inputs.pop("is_last")
lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_)
lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =[]
for model_output in model_outputs:
lowerCamelCase__: Optional[Any] =model_output["candidate_label"]
lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_)
lowerCamelCase__: Dict =self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
lowerCamelCase__: Dict =outputs["scores"][index].item()
lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0])
lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box}
results.append(UpperCAmelCase_)
lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_)
if top_k:
lowerCamelCase__: Dict =results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist()
lowerCamelCase__: Optional[int] ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 10 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
lowerCamelCase__: int =value
lowerCamelCase__: Node | None =None
lowerCamelCase__: Node | None =None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : Node) ->None:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =tree
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Node | None) ->int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__(self : int) ->Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"nielsr/canine-s": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
__A = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__A = 0
__A = 0xE000
__A = 0xE001
__A = 0xE002
__A = 0xE003
__A = 0xE004
# Maps special codepoints to human-readable names.
__A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : List[Any] , UpperCAmelCase_ : Any=chr(UpperCAmelCase_) , UpperCAmelCase_ : List[Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : Any=chr(UpperCAmelCase_) , UpperCAmelCase_ : List[Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : List[str]=chr(UpperCAmelCase_) , UpperCAmelCase_ : Dict=chr(UpperCAmelCase_) , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=2_048 , **UpperCAmelCase_ : Tuple , ) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[Any] =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
lowerCamelCase__: Tuple =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
lowerCamelCase__: Tuple =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
lowerCamelCase__: List[Any] =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
lowerCamelCase__: Any =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__: Dict =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , model_max_length=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCamelCase__: Dict[str, int] ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCamelCase__: Any =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCamelCase__: Dict[int, str] ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCamelCase__: Dict =UNICODE_VOCAB_SIZE
lowerCamelCase__: Optional[Any] =len(self._special_codepoints)
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
return list(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : str) ->int:
'''simple docstring'''
try:
return ord(UpperCAmelCase_)
except TypeError:
raise ValueError(F"""invalid token: '{token}'""")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : int) ->str:
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCAmelCase_)
except TypeError:
raise ValueError(F"""invalid id: {index}""")
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
return "".join(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =[self.sep_token_id]
lowerCamelCase__: List[Any] =[self.cls_token_id]
lowerCamelCase__: List[str] =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Any =[1] + ([0] * len(UpperCAmelCase_)) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCAmelCase_)) + [1]
return result
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: str =[self.sep_token_id]
lowerCamelCase__: int =[self.cls_token_id]
lowerCamelCase__: List[str] =len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->str:
'''simple docstring'''
return ()
| 10 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 1 |
def lowerCAmelCase_ ( __a , __a ) -> list:
"""simple docstring"""
lowerCamelCase__: int =word.split()
def justify(__a , __a , __a ) -> str:
lowerCamelCase__: Tuple =max_width - width
lowerCamelCase__: str =len(__a )
if len(__a ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCamelCase__: List[Any] =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCamelCase__: str =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCamelCase__: str =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__a ):
num_spaces_between_words_list[i] += 1
lowerCamelCase__: Union[str, Any] =[]
for i in range(__a ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__a )
lowerCamelCase__: int =[]
lowerCamelCase__: list[str] =[]
lowerCamelCase__: Any =0
for word in words:
if width + len(__a ) + len(__a ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__a )
width += len(__a )
else:
# justify the line and add it to result
answer.append(justify(__a , __a , __a ) )
# reset new line and new width
lowerCamelCase__ , lowerCamelCase__: int =[word], len(__a )
lowerCamelCase__: str =max_width - width - len(__a )
answer.append(" ".join(__a ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 1 |
from PIL import Image
def lowerCAmelCase_ ( __a ) -> Image:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: Tuple =image.size
lowerCamelCase__: Optional[Any] =0
lowerCamelCase__: List[str] =image.load()
for i in range(__a ):
for j in range(__a ):
lowerCamelCase__: Optional[Any] =pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__a ):
for i in range(__a ):
lowerCamelCase__: int =255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__A = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 10 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
return EnvironmentCommand()
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict =parser.add_parser("env")
download_parser.set_defaults(func=UpperCAmelCase_)
download_parser.add_argument(
"--accelerate-config_file" , default=UpperCAmelCase_ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=UpperCAmelCase_)
def __init__(self : Union[str, Any] , UpperCAmelCase_ : str , *UpperCAmelCase_ : Union[str, Any]) ->None:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =accelerate_config_file
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] ="not installed"
if is_safetensors_available():
import safetensors
lowerCamelCase__: Optional[Any] =safetensors.__version__
elif importlib.util.find_spec("safetensors") is not None:
import safetensors
lowerCamelCase__: Optional[Any] =F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
lowerCamelCase__: Optional[Any] ="not installed"
lowerCamelCase__: Any ="not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCamelCase__: Optional[int] =accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCAmelCase_):
lowerCamelCase__: List[Any] =load_config_from_file(self._accelerate_config_file).to_dict()
lowerCamelCase__: Optional[Any] =(
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()])
if isinstance(UpperCAmelCase_ , UpperCAmelCase_)
else F"""\t{accelerate_config}"""
)
lowerCamelCase__: Optional[Any] ="not installed"
lowerCamelCase__: str ="NA"
if is_torch_available():
import torch
lowerCamelCase__: str =torch.__version__
lowerCamelCase__: List[Any] =torch.cuda.is_available()
lowerCamelCase__: List[str] ="not installed"
lowerCamelCase__: Tuple ="NA"
if is_tf_available():
import tensorflow as tf
lowerCamelCase__: Tuple =tf.__version__
try:
# deprecated in v2.1
lowerCamelCase__: List[Any] =tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCamelCase__: Union[str, Any] =bool(tf.config.list_physical_devices("GPU"))
lowerCamelCase__: int ="not installed"
lowerCamelCase__: Optional[int] ="not installed"
lowerCamelCase__: List[Any] ="not installed"
lowerCamelCase__: Any ="NA"
if is_flax_available():
import flax
import jax
import jaxlib
lowerCamelCase__: Optional[int] =flax.__version__
lowerCamelCase__: Tuple =jax.__version__
lowerCamelCase__: List[Any] =jaxlib.__version__
lowerCamelCase__: Dict =jax.lib.xla_bridge.get_backend().platform
lowerCamelCase__: Union[str, Any] ={
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F"""{safetensors_version}""",
"Accelerate version": F"""{accelerate_version}""",
"Accelerate config": F"""{accelerate_config_str}""",
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": F"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": F"""{flax_version} ({jax_backend})""",
"Jax version": F"""{jax_version}""",
"JaxLib version": F"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
print(self.format_dict(UpperCAmelCase_))
return info
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : str) ->Union[str, Any]:
'''simple docstring'''
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()]) + "\n"
| 10 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: int =len(__a ) // 2
# choose the middle 3 elements
lowerCamelCase__: int =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( __a , __a , __a = 10**-10 ) -> float:
"""simple docstring"""
lowerCamelCase__: str =a
while True:
lowerCamelCase__: Optional[Any] =Decimal(__a ) - (
Decimal(eval(__a ) ) / Decimal(eval(str(diff(__a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__a ) ) < precision: # noqa: S307
return float(__a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 10 | 1 |
from functools import lru_cache
@lru_cache
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
import itertools
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Optional[int] =2
while True:
if is_prime(__a ):
yield num
num += 1
def lowerCAmelCase_ ( __a = 10001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = StableDiffusionDiffEditPipeline
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
lowercase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ = frozenset([] )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Union[str, Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase_ , )
lowerCamelCase__: List[str] =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
lowerCamelCase__: Optional[Any] =DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_zero=UpperCAmelCase_ , )
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
lowerCamelCase__: Dict =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase__: Dict =CLIPTextModel(UpperCAmelCase_)
lowerCamelCase__: List[Any] =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
lowerCamelCase__: str ={
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=0) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =floats_tensor((1, 16, 16) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
lowerCamelCase__: Tuple =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: List[Any] =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: Optional[int] =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: List[Any] ={
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=0) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1)[0]
lowerCamelCase__: Optional[int] =Image.fromarray(np.uinta(UpperCAmelCase_)).convert("RGB")
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: Union[str, Any] =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: Union[str, Any] =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: List[str] ={
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]=0) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_)
lowerCamelCase__: str =image.cpu().permute(0 , 2 , 3 , 1)[0]
lowerCamelCase__: Any =Image.fromarray(np.uinta(UpperCAmelCase_)).convert("RGB")
if str(UpperCAmelCase_).startswith("mps"):
lowerCamelCase__: Dict =torch.manual_seed(UpperCAmelCase_)
else:
lowerCamelCase__: int =torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowerCamelCase__: Dict ={
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
if not hasattr(self.pipeline_class , "_optional_components"):
return
lowerCamelCase__: Dict =self.get_dummy_components()
lowerCamelCase__: Optional[Any] =self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
lowerCamelCase__: Union[str, Any] =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: int =self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase__: Optional[int] =self.get_dummy_inputs(UpperCAmelCase_)
lowerCamelCase__: int =pipe_loaded(**UpperCAmelCase_)[0]
lowerCamelCase__: int =np.abs(output - output_loaded).max()
self.assertLess(UpperCAmelCase_ , 1E-4)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] ="cpu"
lowerCamelCase__: List[Any] =self.get_dummy_components()
lowerCamelCase__: int =self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.get_dummy_mask_inputs(UpperCAmelCase_)
lowerCamelCase__: Dict =pipe.generate_mask(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
lowerCamelCase__: int =np.array([0] * 9)
lowerCamelCase__: Tuple =np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCAmelCase_ , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str ="cpu"
lowerCamelCase__: Optional[Any] =self.get_dummy_components()
lowerCamelCase__: Optional[Any] =self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Tuple =self.get_dummy_inversion_inputs(UpperCAmelCase_)
lowerCamelCase__: List[Any] =pipe.invert(**UpperCAmelCase_).images
lowerCamelCase__: Union[str, Any] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
lowerCamelCase__: int =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
lowerCamelCase__: Optional[int] =np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCAmelCase_ , 1E-3)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ="cpu"
lowerCamelCase__: Any =self.get_dummy_components()
lowerCamelCase__: List[Any] ={"beta_start": 0.0_0085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
lowerCamelCase__: List[Any] =DPMSolverMultistepScheduler(**UpperCAmelCase_)
lowerCamelCase__: Any =DPMSolverMultistepInverseScheduler(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Dict =self.get_dummy_inversion_inputs(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =pipe.invert(**UpperCAmelCase_).images
lowerCamelCase__: List[Any] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
lowerCamelCase__: Optional[int] =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
lowerCamelCase__: int =np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCAmelCase_ , 1E-3)
@require_torch_gpu
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Any) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int]) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png")
lowerCamelCase__: Optional[int] =raw_image.convert("RGB").resize((768, 768))
lowerCamelCase__: Any =raw_image
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =torch.manual_seed(0)
lowerCamelCase__: List[str] =StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa)
lowerCamelCase__: int =DDIMScheduler.from_config(pipe.scheduler.config)
lowerCamelCase__: int =DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] ="a bowl of fruit"
lowerCamelCase__: List[str] ="a bowl of pears"
lowerCamelCase__: Tuple =pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase_ , target_prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , )
lowerCamelCase__: Optional[Any] =pipe.invert(
prompt=UpperCAmelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase_).latents
lowerCamelCase__: Optional[int] =pipe(
prompt=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_latents=UpperCAmelCase_ , generator=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase__: int =(
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png").resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
lowerCamelCase__: str =torch.manual_seed(0)
lowerCamelCase__: Optional[Any] =StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa)
lowerCamelCase__: Dict =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCamelCase__: List[str] =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Dict ="a bowl of fruit"
lowerCamelCase__: Optional[Any] ="a bowl of pears"
lowerCamelCase__: int =pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase_ , target_prompt=UpperCAmelCase_ , generator=UpperCAmelCase_ , )
lowerCamelCase__: List[str] =pipe.invert(
prompt=UpperCAmelCase_ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase_ , num_inference_steps=25 , ).latents
lowerCamelCase__: List[Any] =pipe(
prompt=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_latents=UpperCAmelCase_ , generator=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase__: List[Any] =(
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png").resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
| 10 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : List[str]=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=0.9 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =size if size is not None else {"shortest_edge": 30}
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 30, "width": 30}
lowerCamelCase__: Any =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =min_resolution
lowerCamelCase__: Union[str, Any] =max_resolution
lowerCamelCase__: Union[str, Any] =do_resize_and_center_crop
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: str =crop_pct
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =do_normalize
lowerCamelCase__: List[str] =image_mean
lowerCamelCase__: Tuple =image_std
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(UpperCAmelCase_ , "size"))
self.assertTrue(hasattr(UpperCAmelCase_ , "crop_pct"))
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 30})
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30})
lowerCamelCase__: Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Dict =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: int =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: List[str] =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(UpperCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 10 | 1 |
def lowerCAmelCase_ ( __a , __a ) -> tuple[float, float]:
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: str =equationa
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =equationa
# Calculate the determinants of the matrices
lowerCamelCase__: Optional[int] =aa * ba - aa * ba
lowerCamelCase__: List[Any] =ca * ba - ca * ba
lowerCamelCase__: List[str] =aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowerCamelCase__: str =determinant_x / determinant
lowerCamelCase__: List[Any] =determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 10 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 512,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = RetriBertTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : Optional[Any]="[MASK]" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase_) != tokenize_chinese_chars
):
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , normalizer_state.pop("type"))
lowerCamelCase__: int =do_lower_case
lowerCamelCase__: int =strip_accents
lowerCamelCase__: List[str] =tokenize_chinese_chars
lowerCamelCase__: Tuple =normalizer_class(**UpperCAmelCase_)
lowerCamelCase__: Any =do_lower_case
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=None) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self.sep_token_id]
lowerCamelCase__: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 10 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : str=[10, 20, 30, 40] , UpperCAmelCase_ : Union[str, Any]=[1, 1, 2, 1] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[str]="relu" , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Tuple=None , ) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =parent
lowerCamelCase__: List[Any] =batch_size
lowerCamelCase__: int =image_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: List[Any] =embeddings_size
lowerCamelCase__: Any =hidden_sizes
lowerCamelCase__: List[Any] =depths
lowerCamelCase__: List[Any] =is_training
lowerCamelCase__: Tuple =use_labels
lowerCamelCase__: Any =hidden_act
lowerCamelCase__: int =num_labels
lowerCamelCase__: List[str] =scope
lowerCamelCase__: str =len(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Union[str, Any] =None
if self.use_labels:
lowerCamelCase__: Dict =ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__: Tuple =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict) ->List[str]:
'''simple docstring'''
lowerCamelCase__: int =TFRegNetModel(config=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =model(UpperCAmelCase_ , training=UpperCAmelCase_)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.num_labels
lowerCamelCase__: List[str] =TFRegNetForImageClassification(UpperCAmelCase_)
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , labels=UpperCAmelCase_ , training=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =config_and_inputs
lowerCamelCase__: int ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase_ = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =TFRegNetModelTester(self)
lowerCamelCase__: Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return
@unittest.skip(reason="RegNet does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings")
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: str =model_class(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Dict =[*signature.parameters.keys()]
lowerCamelCase__: Optional[int] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Dict:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[str] =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_) , training=UpperCAmelCase_)
lowerCamelCase__: List[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__: Optional[int] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCamelCase__ , lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: int =["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase__: Any =layer_type
lowerCamelCase__: str =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: str =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int]={}):
lowerCamelCase__: int =model(UpperCAmelCase_ , return_dict=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , return_dict=UpperCAmelCase_ , **UpperCAmelCase_).to_tuple()
def recursive_check(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]):
if isinstance(UpperCAmelCase_ , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase_ , UpperCAmelCase_):
recursive_check(UpperCAmelCase_ , UpperCAmelCase_)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(UpperCAmelCase_ , UpperCAmelCase_)) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}"""
) , )
recursive_check(UpperCAmelCase_ , UpperCAmelCase_)
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: int =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[Any] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
lowerCamelCase__: int =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , {"output_hidden_states": True})
lowerCamelCase__: Union[str, Any] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , {"output_hidden_states": True})
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Optional[Any] =TFRegNetModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
lowerCamelCase__: Union[str, Any] =self.default_image_processor
lowerCamelCase__: Union[str, Any] =prepare_img()
lowerCamelCase__: Any =image_processor(images=UpperCAmelCase_ , return_tensors="tf")
# forward pass
lowerCamelCase__: Tuple =model(**UpperCAmelCase_ , training=UpperCAmelCase_)
# verify the logits
lowerCamelCase__: List[str] =tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: int =tf.constant([-0.4180, -1.5051, -3.4836])
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4)
| 10 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__: Optional[Any] =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__: Dict =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__: Optional[Any] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__: Any =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__: List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=0.02 , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: str =use_labels
lowerCamelCase__: Optional[Any] =vocab_size
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: int =eos_token_id
lowerCamelCase__: Union[str, Any] =pad_token_id
lowerCamelCase__: List[str] =bos_token_id
lowerCamelCase__: int =initializer_range
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowerCamelCase__: str =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowerCamelCase__: int =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: Dict =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
lowerCamelCase__: Any =prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Dict =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =20
lowerCamelCase__: Optional[int] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: str =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: List[Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: Union[str, Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: Dict =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =20
lowerCamelCase__: Optional[Any] =model_class_name(UpperCAmelCase_)
lowerCamelCase__: Any =model.encode(inputs_dict["input_ids"])
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCamelCase__: Optional[int] =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowerCamelCase__: Union[str, Any] =model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__: List[Any] =model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Dict =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
lowerCamelCase__: str =model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
lowerCamelCase__: str =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""")
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase__: Optional[Any] =input_ids.shape[0]
lowerCamelCase__: List[str] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self._get_config_and_data()
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Dict =lm_model(input_ids=UpperCAmelCase_)
lowerCamelCase__: Dict =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase__: str =FlaxBlenderbotForConditionalGeneration(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowerCamelCase__: List[str] =lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowerCamelCase__: Optional[int] =shift_tokens_right(UpperCAmelCase_ , 1 , 2)
lowerCamelCase__: List[str] =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
lowerCamelCase__: Tuple =np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: List[str] =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : List[str]):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest("JIT Enabled"):
lowerCamelCase__: Any =encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: Tuple =encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
lowerCamelCase__: int ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest("JIT Enabled"):
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
lowerCamelCase__: int =decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__: int =np.ones((1, 1)) * model.config.eos_token_id
lowerCamelCase__: str =model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ={"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCamelCase__: Union[str, Any] ={"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCamelCase__: Dict =FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=UpperCAmelCase_)
lowerCamelCase__: List[str] =BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
lowerCamelCase__: Any =["Sam"]
lowerCamelCase__: Tuple =tokenizer(UpperCAmelCase_ , return_tensors="jax")
lowerCamelCase__: Optional[Any] =model.generate(**UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Any ="Sam is a great name. It means \"sun\" in Gaelic."
lowerCamelCase__: Optional[Any] =tokenizer.batch_decode(UpperCAmelCase_ , **UpperCAmelCase_)
assert generated_txt[0].strip() == tgt_text
| 10 | 1 |
def lowerCAmelCase_ ( __a ) -> list[int]:
"""simple docstring"""
lowerCamelCase__: List[str] =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 10 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "prophetnet.tokenizer"}
__A = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__A = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
lowerCamelCase__: int =reader.readlines()
for index, token in enumerate(__a ):
lowerCamelCase__: List[str] =token.rstrip("\n" )
lowerCamelCase__: List[Any] =index
return vocab
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : List[Any]="[SEP]" , UpperCAmelCase_ : Optional[Any]="[SEP]" , UpperCAmelCase_ : int="[UNK]" , UpperCAmelCase_ : Optional[Any]="[PAD]" , UpperCAmelCase_ : Dict="[CLS]" , UpperCAmelCase_ : Dict="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ) ->None:
'''simple docstring'''
lowerCamelCase__: int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
lowerCamelCase__: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: Optional[int] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase__: Optional[int] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
lowerCamelCase__: Optional[int] =F"""[unused{i}]"""
lowerCamelCase__: int =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase__: int =12
lowerCamelCase__: Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase_)
def __getstate__(self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.__dict__.copy()
lowerCamelCase__: Dict =None
return state
def __setstate__(self : List[str] , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: Dict ={}
lowerCamelCase__: Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_)) + [1]
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: str =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: List[str] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase__: Union[str, Any] =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =1
lowerCamelCase__: Optional[Any] =3
lowerCamelCase__: str =(32, 32)
lowerCamelCase__: str =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: List[Any] =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCAmelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Optional[int] =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
torch.manual_seed(0)
lowerCamelCase__: Dict =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: str =self.dummy_cond_unet_upscale
lowerCamelCase__: int =DDPMScheduler()
lowerCamelCase__: str =DDIMScheduler(prediction_type="v_prediction")
lowerCamelCase__: Any =self.dummy_vae
lowerCamelCase__: List[Any] =self.dummy_text_encoder
lowerCamelCase__: Any =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
lowerCamelCase__: Dict =self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
lowerCamelCase__: List[str] =Image.fromarray(np.uinta(UpperCAmelCase_)).convert("RGB").resize((64, 64))
# make sure here that pndm scheduler skips prk
lowerCamelCase__: int =StableDiffusionUpscalePipeline(
unet=UpperCAmelCase_ , low_res_scheduler=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , max_noise_level=350 , )
lowerCamelCase__: List[Any] =sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] ="A painting of a squirrel eating a burger"
lowerCamelCase__: Tuple =torch.Generator(device=UpperCAmelCase_).manual_seed(0)
lowerCamelCase__: Tuple =sd_pipe(
[prompt] , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase__: int =output.images
lowerCamelCase__: List[str] =torch.Generator(device=UpperCAmelCase_).manual_seed(0)
lowerCamelCase__: List[str] =sd_pipe(
[prompt] , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=UpperCAmelCase_ , )[0]
lowerCamelCase__: Optional[int] =image[0, -3:, -3:, -1]
lowerCamelCase__: int =image_from_tuple[0, -3:, -3:, -1]
lowerCamelCase__: List[Any] =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCamelCase__: Dict =np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
lowerCamelCase__: Dict ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: Any =self.dummy_cond_unet_upscale
lowerCamelCase__: Optional[Any] =DDPMScheduler()
lowerCamelCase__: Optional[int] =DDIMScheduler(prediction_type="v_prediction")
lowerCamelCase__: List[str] =self.dummy_vae
lowerCamelCase__: List[Any] =self.dummy_text_encoder
lowerCamelCase__: str =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
lowerCamelCase__: int =self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
lowerCamelCase__: Any =Image.fromarray(np.uinta(UpperCAmelCase_)).convert("RGB").resize((64, 64))
# make sure here that pndm scheduler skips prk
lowerCamelCase__: Dict =StableDiffusionUpscalePipeline(
unet=UpperCAmelCase_ , low_res_scheduler=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , max_noise_level=350 , )
lowerCamelCase__: List[Any] =sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: List[str] ="A painting of a squirrel eating a burger"
lowerCamelCase__: Optional[Any] =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase__: List[str] =output.images
assert image.shape[0] == 2
lowerCamelCase__: Optional[int] =torch.Generator(device=UpperCAmelCase_).manual_seed(0)
lowerCamelCase__: Optional[int] =sd_pipe(
[prompt] , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase__: Union[str, Any] =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =self.dummy_cond_unet_upscale
lowerCamelCase__: str =DDPMScheduler()
lowerCamelCase__: Dict =DDIMScheduler(prediction_type="v_prediction")
lowerCamelCase__: Optional[Any] =self.dummy_vae
lowerCamelCase__: Dict =self.dummy_text_encoder
lowerCamelCase__: str =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
lowerCamelCase__: Union[str, Any] =self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
lowerCamelCase__: List[Any] =Image.fromarray(np.uinta(UpperCAmelCase_)).convert("RGB").resize((64, 64))
# put models in fp16, except vae as it overflows in fp16
lowerCamelCase__: List[str] =unet.half()
lowerCamelCase__: List[Any] =text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase__: List[Any] =StableDiffusionUpscalePipeline(
unet=UpperCAmelCase_ , low_res_scheduler=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , max_noise_level=350 , )
lowerCamelCase__: Tuple =sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowerCamelCase__: List[str] ="A painting of a squirrel eating a burger"
lowerCamelCase__: Optional[Any] =torch.manual_seed(0)
lowerCamelCase__: Union[str, Any] =sd_pipe(
[prompt] , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="np" , ).images
lowerCamelCase__: int =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
lowerCamelCase__: Union[str, Any] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy")
lowerCamelCase__: List[Any] ="stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase__: Optional[Any] =StableDiffusionUpscalePipeline.from_pretrained(UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
pipe.enable_attention_slicing()
lowerCamelCase__: str ="a cat sitting on a park bench"
lowerCamelCase__: Any =torch.manual_seed(0)
lowerCamelCase__: Union[str, Any] =pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type="np" , )
lowerCamelCase__: Optional[int] =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
lowerCamelCase__: int =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy")
lowerCamelCase__: Optional[int] ="stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase__: Dict =StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
pipe.enable_attention_slicing()
lowerCamelCase__: Optional[Any] ="a cat sitting on a park bench"
lowerCamelCase__: Optional[Any] =torch.manual_seed(0)
lowerCamelCase__: List[Any] =pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type="np" , )
lowerCamelCase__: List[str] =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5E-1
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__: Optional[Any] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
lowerCamelCase__: List[str] ="stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase__: int =StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
lowerCamelCase__: Optional[int] ="a cat sitting on a park bench"
lowerCamelCase__: Tuple =torch.manual_seed(0)
lowerCamelCase__: Any =pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , output_type="np" , )
lowerCamelCase__: Dict =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 10 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : List[str] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any]) ->None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
| 10 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Optional[int]) ->Dict:
'''simple docstring'''
super().__init__()
lowerCamelCase__: int =torchvision.models.resnetaaa(pretrained=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =list(model.children())[:-2]
lowerCamelCase__: Optional[int] =nn.Sequential(*UpperCAmelCase_)
lowerCamelCase__: Dict =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.pool(self.model(UpperCAmelCase_))
lowerCamelCase__: str =torch.flatten(UpperCAmelCase_ , start_dim=2)
lowerCamelCase__: int =out.transpose(1 , 2).contiguous()
return out # BxNx2048
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =[json.loads(UpperCAmelCase_) for l in open(UpperCAmelCase_)]
lowerCamelCase__: Tuple =os.path.dirname(UpperCAmelCase_)
lowerCamelCase__: Any =tokenizer
lowerCamelCase__: List[str] =labels
lowerCamelCase__: int =len(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =max_seq_length
lowerCamelCase__: str =transforms
def __len__(self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
return len(self.data)
def __getitem__(self : Tuple , UpperCAmelCase_ : Dict) ->int:
'''simple docstring'''
lowerCamelCase__: Optional[int] =torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=UpperCAmelCase_))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase__: Union[str, Any] =sentence[: self.max_seq_length]
lowerCamelCase__: str =torch.zeros(self.n_classes)
lowerCamelCase__: str =1
lowerCamelCase__: List[Any] =Image.open(os.path.join(self.data_dir , self.data[index]["img"])).convert("RGB")
lowerCamelCase__: int =self.transforms(UpperCAmelCase_)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =Counter()
for row in self.data:
label_freqs.update(row["label"])
return label_freqs
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Tuple =[len(row["sentence"] ) for row in batch]
lowerCamelCase__ , lowerCamelCase__: Optional[int] =len(__a ), max(__a )
lowerCamelCase__: List[str] =torch.zeros(__a , __a , dtype=torch.long )
lowerCamelCase__: Optional[int] =torch.zeros(__a , __a , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__a , __a ) ):
lowerCamelCase__: Optional[int] =input_row["sentence"]
lowerCamelCase__: Dict =1
lowerCamelCase__: List[str] =torch.stack([row["image"] for row in batch] )
lowerCamelCase__: Optional[int] =torch.stack([row["label"] for row in batch] )
lowerCamelCase__: List[str] =torch.stack([row["image_start_token"] for row in batch] )
lowerCamelCase__: List[Any] =torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 10 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__(self : str , UpperCAmelCase_ : int = 65_536 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "fourier" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple[int] = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = False , ) ->Tuple:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple =sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__: Optional[int] =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_)
lowerCamelCase__: int =2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__: str =Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__: List[Any] =block_out_channels[0] * 4
lowerCamelCase__: Tuple =TimestepEmbedding(
in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , )
lowerCamelCase__: List[str] =nn.ModuleList([])
lowerCamelCase__: List[str] =None
lowerCamelCase__: List[str] =nn.ModuleList([])
lowerCamelCase__: str =None
# down
lowerCamelCase__: Optional[Any] =in_channels
for i, down_block_type in enumerate(UpperCAmelCase_):
lowerCamelCase__: Optional[Any] =output_channel
lowerCamelCase__: List[str] =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__: Optional[Any] =i == len(UpperCAmelCase_) - 1
lowerCamelCase__: int =get_down_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase_)
# mid
lowerCamelCase__: Any =get_mid_block(
UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , )
# up
lowerCamelCase__: Optional[Any] =list(reversed(UpperCAmelCase_))
lowerCamelCase__: Optional[Any] =reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__: Tuple =out_channels
else:
lowerCamelCase__: Optional[int] =block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_):
lowerCamelCase__: int =output_channel
lowerCamelCase__: str =(
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_) - 1 else final_upsample_channels
)
lowerCamelCase__: Union[str, Any] =i == len(UpperCAmelCase_) - 1
lowerCamelCase__: Any =get_up_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =output_channel
# out
lowerCamelCase__: Optional[Any] =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
lowerCamelCase__: Union[str, Any] =get_out_block(
out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : bool = True , ) ->Union[UNetaDOutput, Tuple]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =timestep
if not torch.is_tensor(UpperCAmelCase_):
lowerCamelCase__: Any =torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(UpperCAmelCase_) and len(timesteps.shape) == 0:
lowerCamelCase__: Optional[Any] =timesteps[None].to(sample.device)
lowerCamelCase__: Union[str, Any] =self.time_proj(UpperCAmelCase_)
if self.config.use_timestep_embedding:
lowerCamelCase__: Dict =self.time_mlp(UpperCAmelCase_)
else:
lowerCamelCase__: Optional[Any] =timestep_embed[..., None]
lowerCamelCase__: Union[str, Any] =timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
lowerCamelCase__: List[str] =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
lowerCamelCase__: List[str] =()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__: Any =downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__: int =self.mid_block(UpperCAmelCase_ , UpperCAmelCase_)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
lowerCamelCase__: Union[str, Any] =down_block_res_samples[-1:]
lowerCamelCase__: Optional[int] =down_block_res_samples[:-1]
lowerCamelCase__: List[str] =upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_)
# 5. post-process
if self.out_block:
lowerCamelCase__: List[Any] =self.out_block(UpperCAmelCase_ , UpperCAmelCase_)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase_)
| 10 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "data2vec-vision"
def __init__(self : Optional[Any] , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Optional[int]=3_072 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[str]=1E-1_2 , UpperCAmelCase_ : Optional[Any]=224 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=0.4 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[Any]=255 , **UpperCAmelCase_ : str , ) ->Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Optional[int] =num_attention_heads
lowerCamelCase__: Dict =intermediate_size
lowerCamelCase__: Optional[Any] =hidden_act
lowerCamelCase__: int =hidden_dropout_prob
lowerCamelCase__: Dict =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =initializer_range
lowerCamelCase__: Union[str, Any] =layer_norm_eps
lowerCamelCase__: Optional[int] =image_size
lowerCamelCase__: Optional[int] =patch_size
lowerCamelCase__: str =num_channels
lowerCamelCase__: Union[str, Any] =use_mask_token
lowerCamelCase__: Optional[Any] =use_absolute_position_embeddings
lowerCamelCase__: Optional[int] =use_relative_position_bias
lowerCamelCase__: int =use_shared_relative_position_bias
lowerCamelCase__: Optional[int] =layer_scale_init_value
lowerCamelCase__: List[Any] =drop_path_rate
lowerCamelCase__: int =use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__: Optional[Any] =out_indices
lowerCamelCase__: Tuple =pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__: str =use_auxiliary_head
lowerCamelCase__: Optional[int] =auxiliary_loss_weight
lowerCamelCase__: Optional[Any] =auxiliary_channels
lowerCamelCase__: Union[str, Any] =auxiliary_num_convs
lowerCamelCase__: List[str] =auxiliary_concat_input
lowerCamelCase__: int =semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def SCREAMING_SNAKE_CASE_ (self : Any) ->float:
'''simple docstring'''
return 1E-4
| 10 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =n
lowerCamelCase__: Tuple =[None] * self.n
lowerCamelCase__: str =0 # index of the first element
lowerCamelCase__: Tuple =0
lowerCamelCase__: Optional[Any] =0
def __len__(self : str) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE_ (self : int) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL")
lowerCamelCase__: List[Any] =data
lowerCamelCase__: Dict =(self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW")
lowerCamelCase__: Optional[Any] =self.array[self.front]
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Dict =(self.front + 1) % self.n
self.size -= 1
return temp
| 10 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__A = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__A = 12_8022
__A = 12_8028
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = MaMaaaTokenizer
lowercase_ = False
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[Any] =["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowerCamelCase__: Optional[Any] =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: int =Path(self.tmpdirname)
save_json(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES["vocab_file"])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES["spm_file"])
lowerCamelCase__: List[Any] =MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Any) ->str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any) ->Optional[Any]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict ="</s>"
lowerCamelCase__: int =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.get_tokenizer()
lowerCamelCase__: Dict =list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , "</s>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "<s>")
self.assertEqual(len(UpperCAmelCase_) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip("Skip this test while all models are still to be uploaded.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.get_tokenizer()
lowerCamelCase__: Any =tokenizer.tokenize("This is a test")
self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [2, 3, 4, 5, 6] , )
lowerCamelCase__: List[Any] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(UpperCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
lowerCamelCase__: int =tokenizer.convert_tokens_to_string(UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , "This is a test")
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] ={"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = "facebook/m2m100_418M"
lowercase_ = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
lowercase_ = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
lowercase_ = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr")
lowerCamelCase__: List[Any] =1
return cls
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar") , 128_006)
self.assertEqual(self.tokenizer.get_lang_id("en") , 128_022)
self.assertEqual(self.tokenizer.get_lang_id("ro") , 128_076)
self.assertEqual(self.tokenizer.get_lang_id("mr") , 128_063)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.tokenizer.get_vocab()
self.assertEqual(len(UpperCAmelCase_) , self.tokenizer.vocab_size)
self.assertEqual(vocab["<unk>"] , 3)
self.assertIn(self.tokenizer.get_lang_token("en") , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
lowerCamelCase__: str ="en"
lowerCamelCase__: int =self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids)
# fmt: off
lowerCamelCase__: Dict =[FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
lowerCamelCase__: int =self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Dict =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: int =tempfile.mkdtemp()
lowerCamelCase__: Any =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: Tuple =MaMaaaTokenizer.from_pretrained(UpperCAmelCase_)
self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase_)
@require_torch
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="en"
lowerCamelCase__: Any ="fr"
lowerCamelCase__: Optional[Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors="pt")
lowerCamelCase__: List[str] =shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
lowerCamelCase__: Any =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Tuple ="mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
lowerCamelCase__: List[Any] ="zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Any ="mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
lowerCamelCase__: Tuple ="zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar")
self.assertEqual(
nested_simplify(UpperCAmelCase_) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 10 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __a , __a , __a ) -> tuple[float, list[float]]:
"""simple docstring"""
lowerCamelCase__: Any =list(range(len(__a ) ) )
lowerCamelCase__: Optional[Any] =[v / w for v, w in zip(__a , __a )]
index.sort(key=lambda __a : ratio[i] , reverse=__a )
lowerCamelCase__: float =0
lowerCamelCase__: list[float] =[0] * len(__a )
for i in index:
if weight[i] <= capacity:
lowerCamelCase__: Optional[int] =1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase__: Optional[int] =capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=99 , UpperCAmelCase_ : Optional[int]=36 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[str]=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : int=6 , UpperCAmelCase_ : Dict=6 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=1_000 , ) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =parent
lowerCamelCase__: Union[str, Any] =batch_size
lowerCamelCase__: Dict =num_channels
lowerCamelCase__: int =image_size
lowerCamelCase__: List[Any] =patch_size
lowerCamelCase__: Union[str, Any] =text_seq_length
lowerCamelCase__: str =is_training
lowerCamelCase__: Dict =use_input_mask
lowerCamelCase__: Optional[Any] =use_token_type_ids
lowerCamelCase__: List[str] =use_labels
lowerCamelCase__: int =vocab_size
lowerCamelCase__: Optional[Any] =hidden_size
lowerCamelCase__: Tuple =num_hidden_layers
lowerCamelCase__: Optional[Any] =num_attention_heads
lowerCamelCase__: Optional[int] =intermediate_size
lowerCamelCase__: Union[str, Any] =hidden_act
lowerCamelCase__: Union[str, Any] =hidden_dropout_prob
lowerCamelCase__: Dict =attention_probs_dropout_prob
lowerCamelCase__: Any =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =type_sequence_label_size
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: Optional[int] =coordinate_size
lowerCamelCase__: Any =shape_size
lowerCamelCase__: Optional[Any] =num_labels
lowerCamelCase__: Optional[int] =num_choices
lowerCamelCase__: int =scope
lowerCamelCase__: str =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase__: str =text_seq_length
lowerCamelCase__: List[Any] =(image_size // patch_size) ** 2 + 1
lowerCamelCase__: List[Any] =self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
lowerCamelCase__: Union[str, Any] =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__: Dict =bbox[i, j, 3]
lowerCamelCase__: Union[str, Any] =bbox[i, j, 1]
lowerCamelCase__: str =t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__: Tuple =bbox[i, j, 2]
lowerCamelCase__: Any =bbox[i, j, 0]
lowerCamelCase__: Optional[Any] =t
lowerCamelCase__: str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Union[str, Any] =None
if self.use_input_mask:
lowerCamelCase__: Optional[int] =random_attention_mask([self.batch_size, self.text_seq_length])
lowerCamelCase__: Any =None
if self.use_token_type_ids:
lowerCamelCase__: Any =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
lowerCamelCase__: str =None
lowerCamelCase__: List[Any] =None
if self.use_labels:
lowerCamelCase__: Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCamelCase__: Tuple =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
lowerCamelCase__: str =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =LayoutLMvaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# text + image
lowerCamelCase__: Optional[int] =model(UpperCAmelCase_ , pixel_values=UpperCAmelCase_)
lowerCamelCase__: Tuple =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
lowerCamelCase__: Any =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
lowerCamelCase__: int =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
lowerCamelCase__: str =model(UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
lowerCamelCase__: int =model(pixel_values=UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Any =self.num_labels
lowerCamelCase__: Optional[Any] =LayoutLMvaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: int =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.num_labels
lowerCamelCase__: List[str] =LayoutLMvaForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Union[str, Any] =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =LayoutLMvaForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Union[str, Any] =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
): List[Any] =config_and_inputs
lowerCamelCase__: List[str] ={
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any) ->Optional[int]:
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =LayoutLMvaModelTester(self)
lowerCamelCase__: Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=False) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =copy.deepcopy(UpperCAmelCase_)
if model_class in get_values(UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] ={
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(UpperCAmelCase_ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase_):
lowerCamelCase__: Optional[int] =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in get_values(UpperCAmelCase_):
lowerCamelCase__: int =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
lowerCamelCase__: int =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in [
*get_values(UpperCAmelCase_),
]:
lowerCamelCase__: Union[str, Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in [
*get_values(UpperCAmelCase_),
]:
lowerCamelCase__: int =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
return inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__: Union[str, Any] =type
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: int =LayoutLMvaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase__: str =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base").to(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.default_image_processor
lowerCamelCase__: List[Any] =prepare_img()
lowerCamelCase__: Union[str, Any] =image_processor(images=UpperCAmelCase_ , return_tensors="pt").pixel_values.to(UpperCAmelCase_)
lowerCamelCase__: Any =torch.tensor([[1, 2]])
lowerCamelCase__: str =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
lowerCamelCase__: Tuple =model(
input_ids=input_ids.to(UpperCAmelCase_) , bbox=bbox.to(UpperCAmelCase_) , pixel_values=pixel_values.to(UpperCAmelCase_) , )
# verify the logits
lowerCamelCase__: str =torch.Size((1, 199, 768))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
lowerCamelCase__: Dict =torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4))
| 10 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =dataset
lowerCamelCase__: List[str] =process
lowerCamelCase__: Any =params
def __len__(self : Optional[int]) ->Any:
'''simple docstring'''
return len(self.dataset)
def __getitem__(self : Any , UpperCAmelCase_ : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.dataset[i]
lowerCamelCase__: int =self.process(UpperCAmelCase_ , **self.params)
return processed
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]=None) ->Dict:
'''simple docstring'''
lowerCamelCase__: int =loader
lowerCamelCase__: int =infer
lowerCamelCase__: List[str] =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__: List[Any] =None
lowerCamelCase__: Tuple =loader_batch_size
# Internal bookkeeping
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: List[Any] =None
def __len__(self : Dict) ->Any:
'''simple docstring'''
return len(self.loader)
def __iter__(self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =iter(self.loader)
return self
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__: Any =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__: Optional[int] ={}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Convert ModelOutput to tuple first
lowerCamelCase__: int =element.to_tuple()
if isinstance(element[0] , torch.Tensor):
lowerCamelCase__: Dict =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
lowerCamelCase__: Union[str, Any] =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
lowerCamelCase__: Any =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
lowerCamelCase__: Union[str, Any] =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__: List[str] =None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__: Optional[int] =element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__: Dict =np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__: List[str] =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__: Optional[int] =self._loader_batch_data.__class__(UpperCAmelCase_)
self._loader_batch_index += 1
return result
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__: int =next(self.iterator)
lowerCamelCase__: List[str] =self.infer(UpperCAmelCase_ , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase_ , torch.Tensor):
lowerCamelCase__: List[Any] =processed
else:
lowerCamelCase__: Optional[int] =list(processed.keys())[0]
lowerCamelCase__: Dict =processed[key]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Optional[int] =len(UpperCAmelCase_)
else:
lowerCamelCase__: str =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__: str =observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__: List[Any] =processed
lowerCamelCase__: List[Any] =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=None) ->Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __iter__(self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =iter(self.loader)
lowerCamelCase__: int =None
return self
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
if self.subiterator is None:
lowerCamelCase__: Dict =self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
lowerCamelCase__: Any =next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__: Optional[int] =self.infer(next(self.iterator) , **self.params)
lowerCamelCase__: int =next(self.subiterator)
return processed
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __iter__(self : Dict) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =iter(self.loader)
return self
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =False
lowerCamelCase__: str =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__: List[str] =self.loader_batch_item()
lowerCamelCase__: Union[str, Any] =item.pop("is_last")
accumulator.append(UpperCAmelCase_)
if is_last:
return accumulator
while not is_last:
lowerCamelCase__: Optional[Any] =self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase_ , torch.Tensor):
lowerCamelCase__: Optional[int] =processed
else:
lowerCamelCase__: List[str] =list(processed.keys())[0]
lowerCamelCase__: Union[str, Any] =processed[key]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
else:
lowerCamelCase__: Any =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__: int =observed_batch_size
lowerCamelCase__: Union[str, Any] =processed
lowerCamelCase__: Dict =0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__: Dict =self.loader_batch_item()
lowerCamelCase__: Tuple =item.pop("is_last")
accumulator.append(UpperCAmelCase_)
if is_last:
return accumulator
else:
lowerCamelCase__: int =processed
lowerCamelCase__: int =item.pop("is_last")
accumulator.append(UpperCAmelCase_)
return accumulator
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int =dataset
lowerCamelCase__: int =key
def __len__(self : List[Any]) ->List[str]:
'''simple docstring'''
return len(self.dataset)
def __getitem__(self : Dict , UpperCAmelCase_ : str) ->Union[str, Any]:
'''simple docstring'''
return self.dataset[i][self.key]
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : str , UpperCAmelCase_ : str) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[Any] =dataset
lowerCamelCase__: Union[str, Any] =keya
lowerCamelCase__: str =keya
def __len__(self : List[str]) ->int:
'''simple docstring'''
return len(self.dataset)
def __getitem__(self : Optional[Any] , UpperCAmelCase_ : Dict) ->List[Any]:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 10 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> list:
"""simple docstring"""
lowerCamelCase__: Any =[]
lowerCamelCase__ , lowerCamelCase__: Any =input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCamelCase__: str =result + left + right
return input_list
def lowerCAmelCase_ ( __a ) -> list:
"""simple docstring"""
if len(__a ) <= 1:
return input_list
lowerCamelCase__: Any =list(__a )
# iteration for two-way merging
lowerCamelCase__: str =2
while p <= len(__a ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__a ) , __a ):
lowerCamelCase__: Dict =i
lowerCamelCase__: List[str] =i + p - 1
lowerCamelCase__: int =(low + high + 1) // 2
lowerCamelCase__: Optional[int] =merge(__a , __a , __a , __a )
# final merge of last two parts
if p * 2 >= len(__a ):
lowerCamelCase__: List[Any] =i
lowerCamelCase__: Optional[int] =merge(__a , 0 , __a , len(__a ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
__A = []
else:
__A = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 10 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(UpperCAmelCase_)
def __call__(self : Optional[int] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
if "text_queries" in kwargs:
lowerCamelCase__: Any =kwargs.pop("text_queries")
if isinstance(UpperCAmelCase_ , (str, Image.Image)):
lowerCamelCase__: List[Any] ={"image": image, "candidate_labels": candidate_labels}
else:
lowerCamelCase__: Any =image
lowerCamelCase__: Dict =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
return results
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] ={}
if "threshold" in kwargs:
lowerCamelCase__: List[Any] =kwargs["threshold"]
if "top_k" in kwargs:
lowerCamelCase__: Any =kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(inputs["image"])
lowerCamelCase__: Dict =inputs["candidate_labels"]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Any =candidate_labels.split(",")
lowerCamelCase__: Optional[int] =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(UpperCAmelCase_):
lowerCamelCase__: Dict =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework)
lowerCamelCase__: Union[str, Any] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(UpperCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model_inputs.pop("target_size")
lowerCamelCase__: Dict =model_inputs.pop("candidate_label")
lowerCamelCase__: Dict =model_inputs.pop("is_last")
lowerCamelCase__: Union[str, Any] =self.model(**UpperCAmelCase_)
lowerCamelCase__: Dict ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : str=None) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =[]
for model_output in model_outputs:
lowerCamelCase__: Optional[Any] =model_output["candidate_label"]
lowerCamelCase__: Tuple =BaseModelOutput(UpperCAmelCase_)
lowerCamelCase__: Dict =self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
lowerCamelCase__: Dict =outputs["scores"][index].item()
lowerCamelCase__: Dict =self._get_bounding_box(outputs["boxes"][index][0])
lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box}
results.append(UpperCAmelCase_)
lowerCamelCase__: List[str] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_)
if top_k:
lowerCamelCase__: Dict =results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =box.int().tolist()
lowerCamelCase__: Optional[int] ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 10 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
lowerCamelCase__: Optional[int] =ksize + 1
lowerCamelCase__: Tuple =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__a ):
for x in range(__a ):
# distance from center
lowerCamelCase__: Optional[int] =x - ksize // 2
lowerCamelCase__: str =y - ksize // 2
# degree to radiant
lowerCamelCase__: str =theta / 180 * np.pi
lowerCamelCase__: str =np.cos(_theta )
lowerCamelCase__: List[Any] =np.sin(_theta )
# get kernel x
lowerCamelCase__: int =cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase__: Optional[int] =-sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase__: Tuple =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A = imread("../image_data/lena.jpg")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__A = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A = out / out.max() * 255
__A = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 10 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 1 |
import datasets
__A = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
__A = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
__A = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]) ->Union[str, Any]:
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_)}
| 10 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.