code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 84 | import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
@staticmethod
def __A ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase):
UpperCamelCase_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __A ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
SCREAMING_SNAKE_CASE : List[str] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __A ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = object_detector(examples[0] , threshold=0.0 )
SCREAMING_SNAKE_CASE : Tuple = len(UpperCamelCase__ )
self.assertGreater(UpperCamelCase__ , 0 )
self.assertEqual(
UpperCamelCase__ , [
{
'''score''': ANY(UpperCamelCase__ ),
'''label''': ANY(UpperCamelCase__ ),
'''box''': {'''xmin''': ANY(UpperCamelCase__ ), '''ymin''': ANY(UpperCamelCase__ ), '''xmax''': ANY(UpperCamelCase__ ), '''ymax''': ANY(UpperCamelCase__ )},
}
for i in range(UpperCamelCase__ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
SCREAMING_SNAKE_CASE : str = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE : Optional[int] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __A ( self : str ):
'''simple docstring'''
pass
@require_torch
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 0.2
SCREAMING_SNAKE_CASE : Optional[int] = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE : List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 182 | 0 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__snake_case ) - len(__snake_case ) + 1 ):
_UpperCamelCase = [x.match(__snake_case ) for x, y in zip(__snake_case, ks[i:] )]
if matches and all(__snake_case ):
return True
return False
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
def replace(__snake_case, __snake_case ):
for rule, replacement in rules:
if _match(__snake_case, __snake_case ):
return replacement
return val
return replace
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''', __snake_case )),
(("transformer", "wte", "embedding"), P('''mp''', __snake_case )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__snake_case, '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''', __snake_case )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__snake_case, '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''', __snake_case )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = _get_partition_rules()
_UpperCamelCase = _replacement_rules(__snake_case )
_UpperCamelCase = {k: _unmatched for k in flatten_dict(__snake_case )}
_UpperCamelCase = {k: replace(__snake_case, __snake_case ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__snake_case ) )
| 100 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = BioGptModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BioGptModel(config=__a)
model.to(__a)
model.eval()
# create attention mask
_UpperCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__a)
_UpperCamelCase = self.seq_length // 2
_UpperCamelCase = 0
# first forward pass
_UpperCamelCase , _UpperCamelCase = model(__a , attention_mask=__a).to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
_UpperCamelCase = ids_tensor((1,) , __a).item() + 1
_UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
_UpperCamelCase = random_other_next_tokens
# append to next input_ids and attn_mask
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCamelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__a)] , dim=1 , )
# get two different outputs
_UpperCamelCase = model(__a , attention_mask=__a)['''last_hidden_state''']
_UpperCamelCase = model(__a , past_key_values=__a , attention_mask=__a)['''last_hidden_state''']
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BioGptModel(config=__a).to(__a).eval()
_UpperCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__a)
# first forward pass
_UpperCamelCase = model(__a , attention_mask=__a , use_cache=__a)
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1)
_UpperCamelCase = model(__a , attention_mask=__a)['''last_hidden_state''']
_UpperCamelCase = model(__a , attention_mask=__a , past_key_values=__a)[
'''last_hidden_state'''
]
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a , __a=False) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM(__a)
model.to(__a)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def UpperCAmelCase ( self , __a , *__a) -> Any:
'''simple docstring'''
_UpperCamelCase = BioGptModel(__a)
_UpperCamelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01)
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = BioGptForTokenClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase__ = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase__ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BioGptModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__a , gradient_checkpointing=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__a)
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(__a)
_UpperCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
_UpperCamelCase = '''left'''
# Define PAD Token = EOS Token = 50256
_UpperCamelCase = tokenizer.eos_token
_UpperCamelCase = model.config.eos_token_id
# use different length sentences to test batching
_UpperCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''pt''' , padding=__a)
_UpperCamelCase = inputs['''input_ids'''].to(__a)
_UpperCamelCase = model.generate(
input_ids=__a , attention_mask=inputs['''attention_mask'''].to(__a) , )
_UpperCamelCase = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(__a)
_UpperCamelCase = model.generate(input_ids=__a)
_UpperCamelCase = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
_UpperCamelCase = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(__a)
_UpperCamelCase = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings)
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
_UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a)
_UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__a)
_UpperCamelCase = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , [non_padded_sentence, padded_sentence])
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = BioGptModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = 3
_UpperCamelCase = input_dict['''input_ids''']
_UpperCamelCase = input_ids.ne(1).to(__a)
_UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
_UpperCamelCase = BioGptForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , labels=__a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = 3
_UpperCamelCase = '''multi_label_classification'''
_UpperCamelCase = input_dict['''input_ids''']
_UpperCamelCase = input_ids.ne(1).to(__a)
_UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
_UpperCamelCase = BioGptForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , labels=__a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
_UpperCamelCase = torch.tensor([[2, 48_05, 9, 6_56, 21]])
_UpperCamelCase = model(__a)[0]
_UpperCamelCase = 4_23_84
_UpperCamelCase = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4))
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
_UpperCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(__a)
torch.manual_seed(0)
_UpperCamelCase = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(__a)
_UpperCamelCase = model.generate(
**__a , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__a , )
_UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__a)
_UpperCamelCase = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__a , __a)
| 100 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __a ( __UpperCamelCase ):
__lowercase : UNetaDModel
__lowercase : ScoreSdeVeScheduler
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 2_000 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowercase__: Optional[int] = self.unet.config.sample_size
lowercase__: Tuple = (batch_size, 3, img_size, img_size)
lowercase__: Tuple = self.unet
lowercase__: Dict = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ ) * self.scheduler.init_noise_sigma
lowercase__: Optional[Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase__ )
self.scheduler.set_sigmas(lowerCAmelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__: Union[str, Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__: List[str] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
lowercase__: Optional[Any] = self.scheduler.step_correct(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# prediction step
lowercase__: List[str] = model(lowerCAmelCase__ , lowerCAmelCase__ ).sample
lowercase__: Optional[int] = self.scheduler.step_pred(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
lowercase__ , lowercase__: int = output.prev_sample, output.prev_sample_mean
lowercase__: Tuple = sample_mean.clamp(0 , 1 )
lowercase__: Optional[int] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__: List[Any] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 196 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __a :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
pass
def snake_case_ ( snake_case ) -> Optional[Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __a ( unittest.TestCase ):
__lowercase : Dict = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Optional[Any] = pipeline(
'document-question-answering' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowercase__: int = INVOICE_URL
lowercase__: Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '' ) ) )
lowercase__: str = 'What is the placebo?'
lowercase__: Any = [
{
'image': load_image(lowerCAmelCase__ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: str = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'score': ANY(lowerCAmelCase__ ), 'answer': ANY(lowerCAmelCase__ ), 'start': ANY(lowerCAmelCase__ ), 'end': ANY(lowerCAmelCase__ )},
{'score': ANY(lowerCAmelCase__ ), 'answer': ANY(lowerCAmelCase__ ), 'start': ANY(lowerCAmelCase__ ), 'end': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Union[str, Any] = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowercase__: Optional[Any] = INVOICE_URL
lowercase__: int = 'How many cats are there?'
lowercase__: List[str] = [
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
lowercase__: Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
lowercase__: Tuple = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase__: str = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowercase__: Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase__: int = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowercase__: List[Any] = []
lowercase__: Optional[int] = []
lowercase__: Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[str] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowercase__: int = INVOICE_URL
lowercase__: str = 'What is the invoice number?'
lowercase__: Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowercase__: Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowercase__: Optional[int] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Any = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
lowercase__: Optional[int] = INVOICE_URL
lowercase__: Union[str, Any] = 'What is the invoice number?'
lowercase__: Optional[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowercase__: Tuple = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowercase__: Dict = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Optional[Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCAmelCase__ )
lowercase__: Optional[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCAmelCase__ , revision='3dc6de3' , )
lowercase__: List[str] = INVOICE_URL
lowercase__: Union[str, Any] = 'What is the invoice number?'
lowercase__: Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowercase__: List[str] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowercase__: int = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
lowercase__: Any = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '' ) ) )
# This model should also work if `image` is set to None
lowercase__: List[Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Any = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCAmelCase__ )
lowercase__: str = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCAmelCase__ , revision='3dc6de3' , max_seq_len=50 , )
lowercase__: Optional[Any] = INVOICE_URL
lowercase__: Optional[Any] = 'What is the invoice number?'
lowercase__: Optional[int] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowercase__: Any = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
lowercase__: Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '' ) ) )
# This model should also work if `image` is set to None
lowercase__: Tuple = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: List[Any] = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowercase__: int = INVOICE_URL
lowercase__: int = 'What is the invoice number?'
lowercase__: Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
pass
| 196 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : int = {'vocab_file': 'spm_char.model'}
lowercase : int = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowercase : Union[str, Any] = {
'microsoft/speecht5_asr': 1024,
'microsoft/speecht5_tts': 1024,
'microsoft/speecht5_vc': 1024,
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self :Dict , a :List[str] , a :List[str]="<s>" , a :Dict="</s>" , a :str="<unk>" , a :Optional[Any]="<pad>" , a :Optional[Dict[str, Any]] = None , **a :Union[str, Any] , ) -> None:
__UpperCamelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__UpperCamelCase : Optional[Any] = vocab_file
__UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def _lowerCamelCase ( self :List[str] ) -> str:
return self.sp_model.get_piece_size()
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : List[Any] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = self.__dict__.copy()
__UpperCamelCase : Tuple = None
return state
def __setstate__( self :Dict , a :str ) -> str:
__UpperCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :Any , a :str ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _lowerCamelCase ( self :Union[str, Any] , a :int ) -> str:
return self.sp_model.piece_to_id(a )
def _lowerCamelCase ( self :Optional[int] , a :Optional[Any] ) -> List[str]:
__UpperCamelCase : Tuple = self.sp_model.IdToPiece(a )
return token
def _lowerCamelCase ( self :str , a :Tuple ) -> List[Any]:
__UpperCamelCase : Tuple = []
__UpperCamelCase : Optional[int] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
__UpperCamelCase : Union[str, Any] = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[int] , a :Union[str, Any]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self :Dict , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
__UpperCamelCase : List[str] = [1]
if token_ids_a is None:
return ([0] * len(a )) + suffix_ones
return ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def _lowerCamelCase ( self :Any , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : str = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,) | 151 |
import random
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : bool = False) -> dict:
'''simple docstring'''
__UpperCamelCase : dict = {i: [] for i in range(_lowerCamelCase)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase):
for j in range(i + 1 , _lowerCamelCase):
if random.random() < probability:
graph[i].append(_lowerCamelCase)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase)
return graph
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_lowerCamelCase) if i != j] for i in range(_lowerCamelCase)
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 151 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = AutoencoderKL
SCREAMING_SNAKE_CASE__ = 'sample'
SCREAMING_SNAKE_CASE__ = 1e-2
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = 4
a :int = 3
a :int = (32, 32)
a :List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (3, 32, 32)
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
a :Any = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE__ ( self ):
# enable deterministic behavior for gradient checkpointing
a , a :Any = self.prepare_init_args_and_inputs_for_common()
a :Dict = self.model_class(**_lowerCamelCase )
model.to(_lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
a :List[str] = model(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
a :Tuple = torch.randn_like(_lowerCamelCase )
a :List[str] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
a :Optional[Any] = self.model_class(**_lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
a :Optional[int] = model_a(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
a :List[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
a :str = dict(model.named_parameters() )
a :Any = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :Optional[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowerCamelCase )
a :str = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
a :Optional[int] = model.to(_lowerCamelCase )
model.eval()
if torch_device == "mps":
a :List[str] = torch.manual_seed(0 )
else:
a :List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
a :List[Any] = image.to(_lowerCamelCase )
with torch.no_grad():
a :Tuple = model(_lowerCamelCase , sample_posterior=_lowerCamelCase , generator=_lowerCamelCase ).sample
a :Any = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
a :Optional[Any] = torch.tensor(
[
-4.00_78e-01,
-3.83_23e-04,
-1.26_81e-01,
-1.14_62e-01,
2.00_95e-01,
1.08_93e-01,
-8.82_47e-02,
-3.03_61e-01,
-9.86_44e-03,
] )
elif torch_device == "cpu":
a :Tuple = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
a :Optional[int] = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCamelCase ) for s in shape] )}.npy'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 3, 512, 512) , _lowerCamelCase=False ):
a :Union[str, Any] = torch.floataa if fpaa else torch.floataa
a :Any = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) ).to(_lowerCamelCase ).to(_lowerCamelCase )
return image
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase="CompVis/stable-diffusion-v1-4" , _lowerCamelCase=False ):
a :Optional[Any] = '''fp16''' if fpaa else None
a :Optional[int] = torch.floataa if fpaa else torch.floataa
a :Any = AutoencoderKL.from_pretrained(
_lowerCamelCase , subfolder='''vae''' , torch_dtype=_lowerCamelCase , revision=_lowerCamelCase , )
model.to(_lowerCamelCase ).eval()
return model
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if torch_device == "mps":
return torch.manual_seed(_lowerCamelCase )
return torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = self.get_sd_vae_model()
a :str = self.get_sd_image(_lowerCamelCase )
a :Dict = self.get_generator(_lowerCamelCase )
with torch.no_grad():
a :int = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
a :List[str] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a :Optional[int] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :List[Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
a :str = self.get_sd_image(_lowerCamelCase , fpaa=_lowerCamelCase )
a :Any = self.get_generator(_lowerCamelCase )
with torch.no_grad():
a :int = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
a :Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a :int = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = self.get_sd_vae_model()
a :Optional[Any] = self.get_sd_image(_lowerCamelCase )
with torch.no_grad():
a :str = model(_lowerCamelCase ).sample
assert sample.shape == image.shape
a :Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a :Optional[int] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :int = self.get_sd_vae_model()
a :str = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
a :Union[str, Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
a :str = sample[-1, -2:, :2, -2:].flatten().cpu()
a :List[Any] = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :int = self.get_sd_vae_model(fpaa=_lowerCamelCase )
a :List[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
a :List[Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
a :Any = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a :Any = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = self.get_sd_vae_model(fpaa=_lowerCamelCase )
a :Dict = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
a :Union[str, Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a :Optional[int] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = self.get_sd_vae_model()
a :Union[str, Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
a :Dict = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a :Tuple = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :Union[str, Any] = self.get_sd_vae_model()
a :Optional[int] = self.get_sd_image(_lowerCamelCase )
a :Dict = self.get_generator(_lowerCamelCase )
with torch.no_grad():
a :Optional[Any] = model.encode(_lowerCamelCase ).latent_dist
a :int = dist.sample(generator=_lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
a :Any = sample[0, -1, -3:, -3:].flatten().cpu()
a :List[str] = torch.tensor(_lowerCamelCase )
a :int = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase )
| 94 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ProphetNetTokenizer
__snake_case = False
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
super().setUp()
a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ) ->Dict:
"""simple docstring"""
a = '''UNwant\u00E9d,running'''
a = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a = {}
for i, token in enumerate(__UpperCAmelCase ):
a = i
a = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 0 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _A ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def __snake_case ( self : int):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string")}) , supervised_keys=__UpperCAmelCase , )
def __snake_case ( self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()})]
def __snake_case ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__UpperCAmelCase)
class _A ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def __snake_case ( self : Optional[Any]):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string")})}) , supervised_keys=__UpperCAmelCase , )
def __snake_case ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : str):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()})
]
def __snake_case ( self : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__UpperCAmelCase)
def lowercase ( )-> List[str]:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def lowercase ( )-> str:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class _A ( _a ):
"""simple docstring"""
@require_beam
def __snake_case ( self : List[str]):
a : List[Any] = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a : List[Any] = DummyBeamDataset(cache_dir=__UpperCAmelCase , beam_runner="DirectRunner")
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__UpperCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''')))
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string")}))
a : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __UpperCAmelCase)
self.assertEqual(dset["train"].info.splits["train"].num_examples , __UpperCAmelCase)
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1])
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json")))
del dset
@require_beam
def __snake_case ( self : Tuple):
import apache_beam as beam
a : Optional[Any] = beam.io.parquetio.WriteToParquet
a : Dict = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a : List[Any] = DummyBeamDataset(cache_dir=__UpperCAmelCase , beam_runner="DirectRunner")
with patch("apache_beam.io.parquetio.WriteToParquet") as write_parquet_mock:
a : str = partial(__UpperCAmelCase , num_shards=2)
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__UpperCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''')))
self.assertTrue(
os.path.exists(
os.path.join(
__UpperCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''')))
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string")}))
a : str = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __UpperCAmelCase)
self.assertEqual(dset["train"].info.splits["train"].num_examples , __UpperCAmelCase)
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"]) , sorted(["foo", "bar", "foobar"]))
self.assertTrue(
os.path.exists(os.path.join(__UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json")))
del dset
@require_beam
def __snake_case ( self : List[str]):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a : Dict = DummyBeamDataset(cache_dir=__UpperCAmelCase)
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare)
@require_beam
def __snake_case ( self : Union[str, Any]):
a : Dict = len(get_test_nested_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a : Any = NestedBeamDataset(cache_dir=__UpperCAmelCase , beam_runner="DirectRunner")
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__UpperCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''')))
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string")})}))
a : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __UpperCAmelCase)
self.assertEqual(dset["train"].info.splits["train"].num_examples , __UpperCAmelCase)
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1])
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json")))
del dset
| 226 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : int=18 , __UpperCAmelCase : int=30 , __UpperCAmelCase : Optional[int]=400 , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Union[str, Any]=True , ):
a : Optional[int] = size if size is not None else {"height": 18, "width": 18}
a : Any = parent
a : int = batch_size
a : str = num_channels
a : Dict = image_size
a : Dict = min_resolution
a : Optional[int] = max_resolution
a : Optional[int] = do_resize
a : Any = size
a : Dict = apply_ocr
def __snake_case ( self : Optional[int]):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __snake_case ( self : List[Any]):
a : Optional[int] = LayoutLMvaImageProcessingTester(self)
@property
def __snake_case ( self : Optional[int]):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : List[Any]):
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCAmelCase , "size"))
self.assertTrue(hasattr(__UpperCAmelCase , "apply_ocr"))
def __snake_case ( self : str):
a : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 18, "width": 18})
a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
def __snake_case ( self : Union[str, Any]):
pass
def __snake_case ( self : List[str]):
# Initialize image_processing
a : Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
a : str = image_processing(image_inputs[0] , return_tensors="pt")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __UpperCAmelCase)
self.assertIsInstance(encoding.boxes , __UpperCAmelCase)
# Test batched
a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case ( self : Union[str, Any]):
# Initialize image_processing
a : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray)
# Test not batched input
a : Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a : List[str] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case ( self : List[str]):
# Initialize image_processing
a : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
# Test not batched input
a : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a : List[str] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case ( self : List[str]):
# with apply_OCR = True
a : List[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
a : List[str] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test")
a : int = Image.open(ds[0]["file"]).convert("RGB")
a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a : Tuple = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
a : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCAmelCase)
self.assertListEqual(encoding.boxes , __UpperCAmelCase)
# with apply_OCR = False
a : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase)
a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
| 226 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _a ( unittest.TestCase ):
def __init__( self: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: int=13 , UpperCamelCase_: List[str]=7 , UpperCamelCase_: int=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Any=True , UpperCamelCase_: Tuple=99 , UpperCamelCase_: str=32 , UpperCamelCase_: Union[str, Any]=5 , UpperCamelCase_: Union[str, Any]=4 , UpperCamelCase_: Any=37 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Tuple=512 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: Dict=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: Union[str, Any]=4 , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self: List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _a ( a__ , unittest.TestCase ):
_lowercase : Optional[Any] = True
_lowercase : Tuple = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self: Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaxRoFormerModelTester(self )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_lowerCamelCase )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(_lowerCamelCase )[0]
lowercase__ = 50_000
lowercase__ = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase )
lowercase__ = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 110 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313 | 0 |
import logging
import os
from .state import PartialState
class A (logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def a_ ( __lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
A__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def a_ ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
A__ = kwargs.pop("""main_process_only""" , __lowerCAmelCase )
A__ = kwargs.pop("""in_order""" , __lowerCAmelCase )
if self.isEnabledFor(__lowerCAmelCase ):
if self._should_log(__lowerCAmelCase ):
A__ , A__ = self.process(__lowerCAmelCase , __lowerCAmelCase )
self.logger.log(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
elif in_order:
A__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A__ , A__ = self.process(__lowerCAmelCase , __lowerCAmelCase )
self.logger.log(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
state.wait_for_everyone()
def __lowerCamelCase ( __a :str , __a :str = None ) -> int:
"""simple docstring"""
if log_level is None:
A__ = os.environ.get("""ACCELERATE_LOG_LEVEL""" , __a )
A__ = logging.getLogger(__a )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__a , {} )
| 276 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( __a :List[str] ) -> Tuple:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( __a :int ) -> Optional[int]:
"""simple docstring"""
A__ = create_tensor(__a )
A__ = gather(__a )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( __a :Any ) -> Any:
"""simple docstring"""
A__ = [state.process_index]
A__ = gather_object(__a )
assert len(__a ) == state.num_processes, F'{gathered_obj}, {len(__a )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( __a :List[str] ) -> List[str]:
"""simple docstring"""
A__ = create_tensor(__a )
A__ = broadcast(__a )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( __a :Any ) -> Any:
"""simple docstring"""
if state.is_main_process:
A__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A__ = torch.arange(state.num_processes ).to(state.device )
A__ = pad_across_processes(__a )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( __a :Union[str, Any] ) -> str:
"""simple docstring"""
if state.num_processes != 2:
return
A__ = create_tensor(__a )
A__ = reduce(__a , """sum""" )
A__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__a , __a ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( __a :List[Any] ) -> List[str]:
"""simple docstring"""
if state.num_processes != 2:
return
A__ = create_tensor(__a )
A__ = reduce(__a , """mean""" )
A__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__a , __a ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( __a :str ) -> Optional[int]:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(__a )
state.print("""testing gather_object""" )
test_gather_object(__a )
state.print("""testing broadcast""" )
test_broadcast(__a )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__a )
state.print("""testing reduce_sum""" )
test_reduce_sum(__a )
state.print("""testing reduce_mean""" )
test_reduce_mean(__a )
if __name__ == "__main__":
main()
| 276 | 1 |
import torch
from transformers import AutoModel
class _A( torch.nn.Module ):
"""simple docstring"""
def __init__( self , _A="sayef/fsner-bert-base-uncased" ):
super(__a , self ).__init__()
__A : int = AutoModel.from_pretrained(__a , return_dict=__a )
__A : List[str] = torch.nn.CosineSimilarity(3 , 1e-0_8 )
__A : Tuple = torch.nn.Softmax(dim=1 )
def UpperCAmelCase_ ( self , **_A ):
return self.bert(**__a ).last_hidden_state
def UpperCAmelCase_ ( self , _A ):
return token_embeddings.sum(2 , keepdim=__a )
def UpperCAmelCase_ ( self , _A , _A , _A=1 ):
return self.softmax(T * self.cos(__a , __a ) )
def UpperCAmelCase_ ( self , _A , _A ):
__A : Any = W_supports['sizes'].tolist()
__A : Optional[Any] = W_supports['start_token_id'].item()
__A : Optional[Any] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__A : Tuple = self.BERT(**__a )
__A : Any = self.BERT(**__a )
__A : Optional[Any] = None
__A : Dict = None
__A : Dict = W_supports['input_ids'] == start_token_id
__A : str = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
__A : Dict = 0
else:
__A : List[str] = support_sizes[i - 1]
__A : List[str] = S[s : s + size][start_token_masks[s : s + size]]
__A : int = S[s : s + size][end_token_masks[s : s + size]]
__A : int = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__A : int = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__A : List[Any] = torch.vstack((p_starts, p_start) )
__A : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
__A : str = p_start
__A : str = p_end
return p_starts, p_ends
| 280 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
lowerCAmelCase__ = {
'''moussaKam/mbarthez''': 1_024,
'''moussaKam/barthez''': 1_024,
'''moussaKam/barthez-orangesum-title''': 1_024,
}
lowerCAmelCase__ = '''▁'''
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a = None , **__a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
UpperCamelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
UpperCamelCase = len(self.sp_model ) - 1
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case_ (self , __a , __a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , __a , __a = None , __a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ (self ) -> Any:
return len(self.sp_model )
def snake_case_ (self ) -> int:
UpperCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ (self , __a ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def snake_case_ (self , __a ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(__a )
return spm_id if spm_id else self.unk_token_id
def snake_case_ (self , __a ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__a )
def snake_case_ (self , __a ) -> Union[str, Any]:
UpperCamelCase = []
UpperCamelCase = ""
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(__a )
UpperCamelCase = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__(self ) -> str:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__(self , __a ) -> Optional[int]:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 153 | 0 |
import functools
def _A (__a , __a ) -> List[Any]:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(__SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__SCREAMING_SNAKE_CASE ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ : Dict = set(__SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(__a ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = data
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
def _A () -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Optional[int] = q.get()
SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node
q.put(__a )
SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : int = right_node
q.put(__a )
raise
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Tuple = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : str = []
while not q.empty():
SCREAMING_SNAKE_CASE_ : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE_ : Tuple = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE_ : str = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Any = node
while n or stack:
while n:
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left
SCREAMING_SNAKE_CASE_ : Any = stack.pop()
print(n.data , end=''',''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], []
SCREAMING_SNAKE_CASE_ : List[Any] = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _A (__a = "" , __a=50 , __a="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
UpperCAmelCase_ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 318 | 0 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Dict ) -> List[str]:
super().__init__()
lowerCAmelCase = nn.Linear(3 , 4 )
lowerCAmelCase = nn.BatchNormad(4 )
lowerCAmelCase = nn.Linear(4 , 5 )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Tuple ) -> Any:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Any ) -> Any:
lowerCAmelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , model.state_dict() )
lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'index.json' )
self.assertTrue(os.path.isfile(UpperCAmelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowerCAmelCase = os.path.join(UpperCAmelCase__ , F'''{key}.dat''' )
self.assertTrue(os.path.isfile(UpperCAmelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowerCAmelCase = torch.randn(2 , 3 , dtype=UpperCAmelCase__ )
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase = offload_weight(UpperCAmelCase__ , 'weight' , UpperCAmelCase__ , {} )
lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'weight.dat' )
self.assertTrue(os.path.isfile(UpperCAmelCase__ ) )
self.assertDictEqual(UpperCAmelCase__ , {'weight': {'shape': [2, 3], 'dtype': str(UpperCAmelCase__ ).split('.' )[1]}} )
lowerCAmelCase = load_offloaded_weight(UpperCAmelCase__ , index['weight'] )
self.assertTrue(torch.equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = ModelForTest()
lowerCAmelCase = model.state_dict()
lowerCAmelCase = {k: v for k, v in state_dict.items() if 'linear2' not in k}
lowerCAmelCase = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) )
lowerCAmelCase = {k: v for k, v in state_dict.items() if 'weight' in k}
lowerCAmelCase = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
# Duplicates are removed
lowerCAmelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = {'a.1': 0, 'a.10': 1, 'a.2': 2}
lowerCAmelCase = extract_submodules_state_dict(UpperCAmelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCAmelCase__ , {'a.1': 0, 'a.2': 2} )
lowerCAmelCase = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
lowerCAmelCase = extract_submodules_state_dict(UpperCAmelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCAmelCase__ , {'a.1.a': 0, 'a.2.a': 2} )
| 4 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : str = kwargs.pop("""feature_extractor""" )
_snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
_snake_case : Dict = self.image_processor
_snake_case : Any = False
def __call__( self: Any, *a_: Any, **a_: Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
_snake_case : Dict = kwargs.pop("""images""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : Optional[int] = args[0]
_snake_case : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, *a_, **a_ )
if text is not None:
_snake_case : Tuple = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : List[str] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case : Any = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : int = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : Dict = self.tokenizer.get_added_vocab()
_snake_case : str = {}
while tokens:
_snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE )
if start_token is None:
break
_snake_case : List[Any] = start_token.group(1 )
_snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE )
_snake_case : Dict = start_token.group()
if end_token is None:
_snake_case : List[Any] = tokens.replace(a_, """""" )
else:
_snake_case : List[str] = end_token.group()
_snake_case : str = re.escape(a_ )
_snake_case : str = re.escape(a_ )
_snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE )
if content is not None:
_snake_case : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ )
if value:
if len(a_ ) == 1:
_snake_case : List[str] = value[0]
_snake_case : List[str] = value
else: # leaf nodes
_snake_case : Tuple = []
for leaf in content.split(r"""<sep/>""" ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : int = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
_snake_case : int = output[key][0]
_snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'num_attention_heads' ) )
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=16 , __UpperCAmelCase=[128, 256, 384] , __UpperCAmelCase=[4, 6, 8] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=[16, 16, 16] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = kernel_size
__UpperCamelCase = stride
__UpperCamelCase = padding
__UpperCamelCase = hidden_sizes
__UpperCamelCase = num_attention_heads
__UpperCamelCase = depths
__UpperCamelCase = key_dim
__UpperCamelCase = drop_path_rate
__UpperCamelCase = patch_size
__UpperCamelCase = attention_ratio
__UpperCamelCase = mlp_ratio
__UpperCamelCase = initializer_range
__UpperCamelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = num_labels
__UpperCamelCase = initializer_range
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = LevitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = (self.image_size, self.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__UpperCamelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = LevitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__UpperCamelCase = (self.model_tester.image_size, self.model_tester.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__UpperCamelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__UpperCamelCase = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
__UpperCamelCase = problem_type['title']
__UpperCamelCase = problem_type['num_labels']
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
__UpperCamelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__UpperCamelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
__UpperCamelCase = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = LevitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> Union[str, Any]:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 263 |
"""simple docstring"""
def A ( snake_case :int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
UpperCamelCase : Union[str, Any] = int(input("Enter number: ").strip())
print(f'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 263 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None) | 160 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {}
if train_file is not None:
UpperCamelCase = [train_file]
if eval_file is not None:
UpperCamelCase = [eval_file]
if test_file is not None:
UpperCamelCase = [test_file]
UpperCamelCase = datasets.load_dataset('csv' , data_files=A__ )
UpperCamelCase = list(ds[list(files.keys() )[0]].features.keys() )
UpperCamelCase = features_name.pop(A__ )
UpperCamelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCamelCase = {label: i for i, label in enumerate(A__ )}
UpperCamelCase = tokenizer.model_input_names
UpperCamelCase = {}
if len(A__ ) == 1:
for k in files.keys():
UpperCamelCase = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding='max_length' ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
UpperCamelCase = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding='max_length' , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
UpperCamelCase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCamelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCamelCase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCamelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCamelCase = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCamelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_lowerCamelCase : str = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(metadata={"""help""": """Which column contains the label"""} )
_SCREAMING_SNAKE_CASE = field(default=_a , metadata={"""help""": """The path of the training file"""} )
_SCREAMING_SNAKE_CASE = field(default=_a , metadata={"""help""": """The path of the development file"""} )
_SCREAMING_SNAKE_CASE = field(default=_a , metadata={"""help""": """The path of the test file"""} )
_SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_SCREAMING_SNAKE_CASE = field(default=_a , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCamelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCamelCase = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCamelCase = trainer.evaluate()
UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 249 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Any = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 249 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Dict = UnCLIPImageVariationPipeline
A__ : List[str] = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
A__ : Union[str, Any] = IMAGE_VARIATION_BATCH_PARAMS
A__ : List[Any] = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
A__ : Tuple = False
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return 3_2
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return 3_2
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return 1_0_0
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__UpperCamelCase )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(__UpperCamelCase )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
UpperCamelCase_ = UnCLIPTextProjModel(**__UpperCamelCase )
return model
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = {
"""sample_size""": 3_2,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
UpperCamelCase_ = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(1 )
UpperCamelCase_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.dummy_decoder
UpperCamelCase_ = self.dummy_text_proj
UpperCamelCase_ = self.dummy_text_encoder
UpperCamelCase_ = self.dummy_tokenizer
UpperCamelCase_ = self.dummy_super_res_first
UpperCamelCase_ = self.dummy_super_res_last
UpperCamelCase_ = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1_0_0_0 , )
UpperCamelCase_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1_0_0_0 , )
UpperCamelCase_ = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
UpperCamelCase_ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 , __UpperCamelCase=True ):
"""simple docstring"""
UpperCamelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
if pil_image:
UpperCamelCase_ = input_image * 0.5 + 0.5
UpperCamelCase_ = input_image.clamp(0 , 1 )
UpperCamelCase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase_ = DiffusionPipeline.numpy_to_pil(__UpperCamelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """cpu"""
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**__UpperCamelCase )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
UpperCamelCase_ = pipe(**__UpperCamelCase )
UpperCamelCase_ = output.images
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
UpperCamelCase_ = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase_ = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """cpu"""
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**__UpperCamelCase )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
UpperCamelCase_ = pipe(**__UpperCamelCase )
UpperCamelCase_ = output.images
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
UpperCamelCase_ = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase_ = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """cpu"""
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**__UpperCamelCase )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
UpperCamelCase_ = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
UpperCamelCase_ = pipe(**__UpperCamelCase )
UpperCamelCase_ = output.images
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
UpperCamelCase_ = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
UpperCamelCase_ = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
UpperCamelCase_ = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch.device("""cpu""" )
class lowercase_ :
A__ : List[str] = 1
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**__UpperCamelCase )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
UpperCamelCase_ = pipe.decoder.dtype
UpperCamelCase_ = 1
UpperCamelCase_ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCamelCase_ = pipe.prepare_latents(
__UpperCamelCase , dtype=__UpperCamelCase , device=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , scheduler=DummyScheduler() )
UpperCamelCase_ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCamelCase_ = pipe.prepare_latents(
__UpperCamelCase , dtype=__UpperCamelCase , device=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , scheduler=DummyScheduler() )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
UpperCamelCase_ = pipe(
**__UpperCamelCase , decoder_latents=__UpperCamelCase , super_res_latents=__UpperCamelCase ).images
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
# Don't pass image, instead pass embedding
UpperCamelCase_ = pipeline_inputs.pop("""image""" )
UpperCamelCase_ = pipe.image_encoder(__UpperCamelCase ).image_embeds
UpperCamelCase_ = pipe(
**__UpperCamelCase , decoder_latents=__UpperCamelCase , super_res_latents=__UpperCamelCase , image_embeddings=__UpperCamelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCamelCase_ = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCamelCase , expected_max_diff=__UpperCamelCase )
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device == """cpu"""
UpperCamelCase_ = True
UpperCamelCase_ = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , additional_params_copy_to_batched_inputs=__UpperCamelCase , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCamelCase_ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__UpperCamelCase , additional_params_copy_to_batched_inputs=__UpperCamelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__UpperCamelCase )
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
UpperCamelCase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
UpperCamelCase_ = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
UpperCamelCase_ = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase_ = pipeline(
__UpperCamelCase , generator=__UpperCamelCase , output_type="""np""" , )
UpperCamelCase_ = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase , 1_5 )
| 122 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : int = ["""image_processor""", """tokenizer"""]
A__ : Union[str, Any] = """LayoutLMv2ImageProcessor"""
A__ : Optional[int] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
UpperCamelCase_ = kwargs.pop("""feature_extractor""" )
UpperCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
UpperCamelCase_ = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase_ = features["""words"""]
UpperCamelCase_ = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel values
UpperCamelCase_ = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase_ = self.get_overflowing_images(__UpperCamelCase , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase_ = images
return encoded_inputs
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}''' )
return images_with_overflow
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor
| 122 | 1 |
import qiskit
def A_ ( snake_case_ : int ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : str = qiskit.Aer.get_backend("""aer_simulator""" )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 ,2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 ,2 )
qc_ha.cx(1 ,2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 ,1 ,3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 ,0 ) # extract XOR value
qc_ha.measure(3 ,1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Optional[Any] = qiskit.execute(snake_case_ ,snake_case_ ,shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
__A : str = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 359 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1 , ):
UpperCamelCase : Tuple = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : int = is_training
UpperCamelCase : Union[str, Any] = use_input_mask
UpperCamelCase : Union[str, Any] = use_token_type_ids
UpperCamelCase : Dict = use_labels
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : int = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = type_sequence_label_size
UpperCamelCase : Dict = initializer_range
UpperCamelCase : Dict = num_labels
UpperCamelCase : Tuple = num_choices
UpperCamelCase : Optional[int] = scope
UpperCamelCase : List[Any] = q_groups
UpperCamelCase : Tuple = k_groups
UpperCamelCase : Any = v_groups
UpperCamelCase : List[str] = post_attention_groups
UpperCamelCase : Tuple = intermediate_groups
UpperCamelCase : int = output_groups
def a_ ( self ):
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Tuple = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = SqueezeBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : str = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = self.num_labels
UpperCamelCase : Optional[Any] = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = self.num_labels
UpperCamelCase : str = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = self.num_choices
UpperCamelCase : Tuple = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self ):
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = config_and_inputs
UpperCamelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase : Dict = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase : Dict = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Dict = False
lowercase : str = True
lowercase : str = False
def a_ ( self ):
UpperCamelCase : Any = SqueezeBertModelTester(self )
UpperCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def a_ ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def a_ ( self ):
UpperCamelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
UpperCamelCase : Dict = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : Optional[Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 27 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase : Any = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Dict , *A_ : Optional[int] , **A_ : int ) -> None:
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 204 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class A:
'''simple docstring'''
UpperCamelCase = BlenderbotConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self : int , A_ : Optional[int] , A_ : List[str]=13 , A_ : str=7 , A_ : Any=True , A_ : Any=False , A_ : Optional[Any]=99 , A_ : List[str]=32 , A_ : List[str]=2 , A_ : Dict=4 , A_ : List[str]=37 , A_ : List[str]=0.1 , A_ : Optional[int]=0.1 , A_ : str=20 , A_ : str=2 , A_ : Optional[Any]=1 , A_ : int=0 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase_ = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def a__ ( self : Tuple , A_ : Union[str, Any] , A_ : List[str] ) -> int:
"""simple docstring"""
lowerCamelCase_ = TFBlenderbotModel(config=A_ ).get_decoder()
lowerCamelCase_ = inputs_dict['input_ids']
lowerCamelCase_ = input_ids[:1, :]
lowerCamelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCamelCase_ = inputs_dict['head_mask']
lowerCamelCase_ = 1
# first forward pass
lowerCamelCase_ = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_ = model(A_ , attention_mask=A_ )[0]
lowerCamelCase_ = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Any , lowercase : Tuple , lowercase : List[Any]=None , lowercase : List[str]=None , lowercase : List[Any]=None , lowercase : Tuple=None , lowercase : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase_ = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFBlenderbotModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ['''My friends are cool but they eat too many carbs.''']
UpperCamelCase = '''facebook/blenderbot-400M-distill'''
@cached_property
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a__ ( self : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer(self.src_text , return_tensors='tf' )
lowerCamelCase_ = self.model.generate(
model_inputs.input_ids , )
lowerCamelCase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 204 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__SCREAMING_SNAKE_CASE : Optional[int] = '''\
'''
__SCREAMING_SNAKE_CASE : Any = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
__SCREAMING_SNAKE_CASE : Dict = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase_( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_6 , lowerCamelCase__ = True , lowerCamelCase__=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCamelCase = '''cuda'''
else:
_lowerCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase = AutoModelForCausalLM.from_pretrained(_A )
_lowerCamelCase = model.to(_A )
_lowerCamelCase = AutoTokenizer.from_pretrained(_A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCamelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCamelCase = model.config.max_length - 1
else:
_lowerCamelCase = model.config.max_length
_lowerCamelCase = tokenizer(
_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , return_tensors='''pt''' , return_attention_mask=_A , ).to(_A )
_lowerCamelCase = encodings['''input_ids''']
_lowerCamelCase = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCamelCase = []
_lowerCamelCase = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_A ) , _A ) ):
_lowerCamelCase = min(start_index + batch_size , len(_A ) )
_lowerCamelCase = encoded_texts[start_index:end_index]
_lowerCamelCase = attn_masks[start_index:end_index]
if add_start_token:
_lowerCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_A )
_lowerCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_lowerCamelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_A ), attn_mask] , dim=1 )
_lowerCamelCase = encoded_batch
with torch.no_grad():
_lowerCamelCase = model(_A , attention_mask=_A ).logits
_lowerCamelCase = out_logits[..., :-1, :].contiguous()
_lowerCamelCase = labels[..., 1:].contiguous()
_lowerCamelCase = attn_mask[..., 1:].contiguous()
_lowerCamelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_A )}
| 363 |
"""simple docstring"""
import argparse
from collections import defaultdict
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : str ) -> Optional[int]:
_lowerCamelCase = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = F"""class {class_name}("""
_lowerCamelCase = F"""{4 * " "}def {test_name}("""
_lowerCamelCase = F"""{8 * " "}{correct_line.split()[0]}"""
_lowerCamelCase = F"""{16 * " "}{correct_line.split()[0]}"""
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = []
for line in lines:
if line.startswith(lowercase_ ):
_lowerCamelCase = True
elif in_class and line.startswith(lowercase_ ):
_lowerCamelCase = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
_lowerCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowerCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowerCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowerCamelCase = _lowerCamelCase = _lowerCamelCase = _lowerCamelCase = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , '''w''' ) as f:
for line in new_lines:
f.write(lowercase_ )
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Union[str, Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = {l.strip() for l in f.readlines()}
else:
_lowerCamelCase = None
with open(lowercase_ , '''r''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = defaultdict(lowercase_ )
for line in correct_lines:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 73 | 0 |
import pprint
import requests
UpperCamelCase__ = 'https://zenquotes.io/api'
def lowerCAmelCase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def lowerCAmelCase_ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
UpperCamelCase__ = random_quotes()
pprint.pprint(response)
| 65 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ = [0, 2_5, 5_0]
UpperCamelCase__ = [2_5, 5_0, 7_5]
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ = np.ones(7_5)
UpperCamelCase__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : str =field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_lowerCamelCase : ClassVar[Features] =Features({"""audio""": Audio()} )
_lowerCamelCase : ClassVar[Features] =Features({"""labels""": ClassLabel} )
_lowerCamelCase : str ="audio"
_lowerCamelCase : str ="labels"
def lowercase ( self : int , snake_case_ : Optional[Any] ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , snake_case_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
_UpperCAmelCase = copy.deepcopy(self )
_UpperCAmelCase = self.label_schema.copy()
_UpperCAmelCase = features[self.label_column]
_UpperCAmelCase = label_schema
return task_template
@property
def lowercase ( self : List[str] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 350 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : list ) -> list:
'''simple docstring'''
_UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCAmelCase = True
for i in range(0 , len(__lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
for i in range(1 , len(__lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__SCREAMING_SNAKE_CASE :Optional[Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
__SCREAMING_SNAKE_CASE :List[str] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 156 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """xlnet"""
a__ = ["""mems"""]
a__ = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCamelCase__ : List[str]=3_2000 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Union[str, Any]=24 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple="bi" , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[str]=1E-12 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : str=None , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Any=-1 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : int="last" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]="tanh" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : str=5 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , **UpperCamelCase__ : List[Any] , ) -> Any:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = d_model
__magic_name__ = n_layer
__magic_name__ = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
__magic_name__ = d_model // n_head
__magic_name__ = ff_activation
__magic_name__ = d_inner
__magic_name__ = untie_r
__magic_name__ = attn_type
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = dropout
__magic_name__ = mem_len
__magic_name__ = reuse_len
__magic_name__ = bi_data
__magic_name__ = clamp_len
__magic_name__ = same_length
__magic_name__ = summary_type
__magic_name__ = summary_use_proj
__magic_name__ = summary_activation
__magic_name__ = summary_last_dropout
__magic_name__ = start_n_top
__magic_name__ = end_n_top
__magic_name__ = bos_token_id
__magic_name__ = pad_token_id
__magic_name__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , UpperCamelCase__ , )
__magic_name__ = kwargs["""use_cache"""]
__magic_name__ = use_mems_eval
__magic_name__ = use_mems_train
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 88 |
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[int] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Tuple=False , ) -> Dict:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] )
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] )
else:
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = np.asarray(UpperCamelCase__ )
if ignore_case:
__magic_name__ = np.char.lower(UpperCamelCase__ )
__magic_name__ = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
__magic_name__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
__magic_name__ = string.digits.maketrans("""""" , """""" , string.digits )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 88 | 1 |
"""simple docstring"""
from math import isqrt, loga
def lowercase ( A_ )-> list[int]:
'''simple docstring'''
a : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A_ , A_ ):
a : Optional[Any] = False
return [i for i in range(2 , A_ ) if is_prime[i]]
def lowercase ( A_ = 800_800 , A_ = 800_800 )-> int:
'''simple docstring'''
a : Any = degree * loga(A_ )
a : str = int(A_ )
a : str = calculate_prime_numbers(A_ )
a : Optional[Any] = 0
a : Optional[int] = 0
a : Optional[int] = len(A_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 226 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
def get_matched_characters(A_ , A_ ) -> str:
a : Optional[int] = []
a : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a : int = int(max(0 , i - limit ) )
a : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A_ )
a : int = F'''{_stra[0:_stra.index(A_ )]} {_stra[_stra.index(A_ ) + 1:]}'''
return "".join(A_ )
# matching characters
a : Tuple = get_matched_characters(A_ , A_ )
a : str = get_matched_characters(A_ , A_ )
a : List[str] = len(A_ )
# transposition
a : Union[str, Any] = (
len([(ca, ca) for ca, ca in zip(A_ , A_ ) if ca != ca] ) // 2
)
if not match_count:
a : Tuple = 0.0
else:
a : List[str] = (
1
/ 3
* (
match_count / len(A_ )
+ match_count / len(A_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a : Union[str, Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 226 | 1 |
from __future__ import annotations
import numpy as np
def _lowerCAmelCase ( __lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
snake_case__ : Any = np.shape(__lowerCAmelCase )
if rows != columns:
snake_case__ : Any = (
'''\'table\' has to be of square shaped array but got a '''
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(__lowerCAmelCase )
snake_case__ : Optional[Any] = np.zeros((rows, columns) )
snake_case__ : Dict = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
snake_case__ : Any = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
snake_case__ : List[str] = (table[i][j] - total) / upper[j][j]
snake_case__ : str = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
snake_case__ : Any = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ['''pixel_values''']
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 8 , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[str] = do_rescale
_A: Any = rescale_factor
_A: List[Any] = do_pad
_A: Tuple = pad_size
def __magic_name__ ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
_A , _A: Optional[int] = get_image_size(lowerCAmelCase_ )
_A: Union[str, Any] = (old_height // size + 1) * size - old_height
_A: Optional[Any] = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase_ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : int , ):
"""simple docstring"""
_A: List[str] = do_rescale if do_rescale is not None else self.do_rescale
_A: int = rescale_factor if rescale_factor is not None else self.rescale_factor
_A: str = do_pad if do_pad is not None else self.do_pad
_A: Union[str, Any] = pad_size if pad_size is not None else self.pad_size
_A: List[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_A: Union[str, Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_rescale:
_A: str = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_pad:
_A: str = [self.pad(lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
_A: Optional[Any] = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_A: List[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 121 | 0 |
"""simple docstring"""
import math
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
return math.sqrt(lowercase_ ) * math.sqrt(lowercase_ ) == num
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Dict = n
while left <= right:
_UpperCamelCase : Tuple = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCamelCase : Any = mid - 1
else:
_UpperCamelCase : Tuple = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 |
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = u
for i in range(1, UpperCamelCase__ ):
_UpperCamelCase = temp * (u - i)
return temp
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = int(input('''enter the numbers of values: ''' ) )
_UpperCamelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
_UpperCamelCase = 0
print('''enter the values of parameters in a list: ''' )
_UpperCamelCase = list(map(UpperCamelCase__, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
_UpperCamelCase = float(input() )
_UpperCamelCase = int(input('''enter the value to interpolate: ''' ) )
_UpperCamelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, UpperCamelCase__ ):
for j in range(n - i ):
_UpperCamelCase = y[j + 1][i - 1] - y[j][i - 1]
_UpperCamelCase = y[0][0]
for i in range(1, UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__, UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 194 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __UpperCamelCase ( lowercase__ : Tuple , lowercase__ : int=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = None
if token is not None:
lowerCAmelCase_ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
lowerCAmelCase_ : Tuple = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
lowerCAmelCase_ : Optional[int] = requests.get(lowercase__ , headers=lowercase__ ).json()
lowerCAmelCase_ : Union[str, Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase_ : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase__ ):
lowerCAmelCase_ : Any = requests.get(url + f'&page={i + 2}' , headers=lowercase__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : int=None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = None
if token is not None:
lowerCAmelCase_ : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
lowerCAmelCase_ : str = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
lowerCAmelCase_ : Any = requests.get(lowercase__ , headers=lowercase__ ).json()
lowerCAmelCase_ : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase_ : Optional[int] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase__ ):
lowerCAmelCase_ : Optional[Any] = requests.get(url + f'&page={i + 2}' , headers=lowercase__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = None
if token is not None:
lowerCAmelCase_ : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
lowerCAmelCase_ : Union[str, Any] = requests.get(lowercase__ , headers=lowercase__ , allow_redirects=lowercase__ )
lowerCAmelCase_ : Any = result.headers["""Location"""]
lowerCAmelCase_ : int = requests.get(lowercase__ , allow_redirects=lowercase__ )
lowerCAmelCase_ : Any = os.path.join(lowercase__ , f'{artifact_name}.zip' )
with open(lowercase__ , """wb""" ) as fp:
fp.write(response.content )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : int=None ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : List[Any] = None
with zipfile.ZipFile(lowercase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowercase__ ) as f:
for line in f:
lowerCAmelCase_ : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase_ : List[str] = line[: line.index(""": """ )]
lowerCAmelCase_ : int = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase_ : Optional[int] = line[len("""FAILED """ ) :]
failed_tests.append(lowercase__ )
elif filename == "job_name.txt":
lowerCAmelCase_ : List[Any] = line
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(lowercase__ )} for `errors` '
f'and {len(lowercase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
lowerCAmelCase_ : List[str] = None
if job_name and job_links:
lowerCAmelCase_ : Union[str, Any] = job_links.get(lowercase__ , lowercase__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase_ : Tuple = [x + [y] + [job_link] for x, y in zip(lowercase__ , lowercase__ )]
return result
def __UpperCamelCase ( lowercase__ : List[Any] , lowercase__ : Dict=None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : str = [os.path.join(lowercase__ , lowercase__ ) for p in os.listdir(lowercase__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowercase__ , job_links=lowercase__ ) )
return errors
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : int=None ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase_ : Optional[int] = counter.most_common()
lowerCAmelCase_ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase_ : Union[str, Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase_ : Optional[Any] = dict(sorted(r.items() , key=lambda lowercase__ : item[1]["count"] , reverse=lowercase__ ) )
return r
def __UpperCamelCase ( lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase_ : Optional[int] = test.split("""/""" )[2]
else:
lowerCAmelCase_ : Any = None
return test
def __UpperCamelCase ( lowercase__ : List[Any] , lowercase__ : Optional[int]=None ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Dict = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase_ : int = [x for x in logs if x[2] is not None]
lowerCAmelCase_ : str = {x[2] for x in logs}
lowerCAmelCase_ : Any = {}
for test in tests:
lowerCAmelCase_ : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase_ : int = counter.most_common()
lowerCAmelCase_ : Dict = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase_ : int = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase_ : Optional[Any] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase_ : Any = dict(sorted(r.items() , key=lambda lowercase__ : item[1]["count"] , reverse=lowercase__ ) )
return r
def __UpperCamelCase ( lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : str = """| no. | error | status |"""
lowerCAmelCase_ : Optional[Any] = """|-:|:-|:-|"""
lowerCAmelCase_ : int = [header, sep]
for error in reduced_by_error:
lowerCAmelCase_ : Optional[int] = reduced_by_error[error]["""count"""]
lowerCAmelCase_ : Dict = f'| {count} | {error[:100]} | |'
lines.append(lowercase__ )
return "\n".join(lowercase__ )
def __UpperCamelCase ( lowercase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = """| model | no. of errors | major error | count |"""
lowerCAmelCase_ : List[Any] = """|-:|-:|-:|-:|"""
lowerCAmelCase_ : List[Any] = [header, sep]
for model in reduced_by_model:
lowerCAmelCase_ : List[Any] = reduced_by_model[model]["""count"""]
lowerCAmelCase_ , lowerCAmelCase_ : int = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase_ : Tuple = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(lowercase__ )
return "\n".join(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__UpperCAmelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__UpperCAmelCase = get_job_links(args.workflow_run_id, token=args.token)
__UpperCAmelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__UpperCAmelCase = k.find(' / ')
__UpperCAmelCase = k[index + len(' / ') :]
__UpperCAmelCase = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__UpperCAmelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__UpperCAmelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__UpperCAmelCase = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__UpperCAmelCase = reduce_by_error(errors)
__UpperCAmelCase = reduce_by_model(errors)
__UpperCAmelCase = make_github_table(reduced_by_error)
__UpperCAmelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 28 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __a :
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : str=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : Any=4 , UpperCAmelCase : int=4 , UpperCAmelCase : str=4 , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[Any]=5_12 , UpperCAmelCase : List[str]=0.02 , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Optional[int] = use_input_mask
lowerCAmelCase_ : Optional[Any] = use_token_type_ids
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Any = rotary_dim
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Union[str, Any] = vocab_size - 1
lowerCAmelCase_ : str = vocab_size - 1
lowerCAmelCase_ : Optional[int] = vocab_size - 1
def A ( self : List[Any] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : str = 20
lowerCAmelCase_ : Dict = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Dict = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
lowerCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = 20
lowerCAmelCase_ : List[Any] = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Tuple = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__snake_case : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A ( self : Any ):
lowerCAmelCase_ : List[str] = FlaxGPTJModelTester(self )
def A ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowerCAmelCase_ : Tuple = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = model.config.eos_token_id
lowerCAmelCase_ : List[Any] = jax.jit(model.generate )
lowerCAmelCase_ : Any = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase_ : str = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Dict = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Tuple = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCAmelCase_ : List[str] = fx_state
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : int = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Any = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : Any = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : Tuple = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Dict = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 28 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( _snake_case ):
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
with open(_lowerCamelCase , encoding='''utf-8''' ) as input_file:
a :List[str] = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
a :Dict = input_file.read()
a :Optional[int] = regexp.search(_lowerCamelCase )
return match
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
with open(_lowerCamelCase , encoding='''utf-8''' ) as input_file:
a :Dict = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
a :Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a :Optional[Any] = regexp.finditer(_lowerCamelCase )
a :List[Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = Path('''./datasets''' )
a :Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_lowerCamelCase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = Path('''./datasets''' )
a :int = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_lowerCamelCase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 94 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return "".join(sorted(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
return word_by_signature[signature(UpperCAmelCase_ )]
snake_case : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
snake_case : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
snake_case : str = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
snake_case : Optional[int] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 94 | 1 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowercase__ = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
lowercase__ = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
lowercase__ = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : str , lowercase_ : Tuple , lowercase_ : int=None , lowercase_ : List[str]=True , lowercase_ : int=False ) -> Optional[int]:
if rouge_types is None:
UpperCAmelCase : Tuple = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
UpperCAmelCase : Any = rouge_scorer.RougeScorer(rouge_types=_SCREAMING_SNAKE_CASE , use_stemmer=_SCREAMING_SNAKE_CASE )
if use_aggregator:
UpperCAmelCase : Optional[int] = scoring.BootstrapAggregator()
else:
UpperCAmelCase : Tuple = []
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = scorer.score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if use_aggregator:
aggregator.add_scores(_SCREAMING_SNAKE_CASE )
else:
scores.append(_SCREAMING_SNAKE_CASE )
if use_aggregator:
UpperCAmelCase : int = aggregator.aggregate()
else:
UpperCAmelCase : Optional[int] = {}
for key in scores[0]:
UpperCAmelCase : Tuple = [score[key] for score in scores]
return result
| 368 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : str , lowercase_ : Union[str, Any]=7 , lowercase_ : Union[str, Any]=3 , lowercase_ : int=30 , lowercase_ : Tuple=400 , lowercase_ : Tuple=True , lowercase_ : Optional[int]=None , lowercase_ : List[str]=0.9 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=True , lowercase_ : int=[0.5, 0.5, 0.5] , lowercase_ : List[str]=[0.5, 0.5, 0.5] , ) -> Tuple:
UpperCAmelCase : Optional[int] = size if size is not None else {'shortest_edge': 30}
UpperCAmelCase : int = crop_size if crop_size is not None else {'height': 30, 'width': 30}
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : int = min_resolution
UpperCAmelCase : Optional[int] = max_resolution
UpperCAmelCase : str = do_resize_and_center_crop
UpperCAmelCase : int = size
UpperCAmelCase : Dict = crop_pct
UpperCAmelCase : Union[str, Any] = crop_size
UpperCAmelCase : Optional[int] = do_normalize
UpperCAmelCase : Optional[Any] = image_mean
UpperCAmelCase : Optional[Any] = image_std
def UpperCAmelCase_ ( self : str ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : Any = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Tuple ) -> str:
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
self.assertTrue(hasattr(lowercase_ , 'crop_pct' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : str = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[Any] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : str ) -> Dict:
# Initialize image_processing
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 280 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( __snake_case : np.ndarray , __snake_case : Union[int, Iterable[int]] , __snake_case : bool , __snake_case : int ):
'''simple docstring'''
def constraint_to_multiple_of(__snake_case : List[str] , __snake_case : Dict , __snake_case : Any=0 , __snake_case : int=None ):
UpperCAmelCase_ : Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCAmelCase_ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCAmelCase_ : Dict = math.ceil(val / multiple ) * multiple
return x
UpperCAmelCase_ : Union[str, Any] = (output_size, output_size) if isinstance(__a , __a ) else output_size
UpperCAmelCase_ : List[Any] = get_image_size(__a )
UpperCAmelCase_ : Any = output_size
# determine new height and width
UpperCAmelCase_ : Union[str, Any] = output_height / input_height
UpperCAmelCase_ : Tuple = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCAmelCase_ : Optional[Any] = scale_width
else:
# fit height
UpperCAmelCase_ : Tuple = scale_height
UpperCAmelCase_ : Optional[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__a )
UpperCAmelCase_ : int = constraint_to_multiple_of(scale_width * input_width , multiple=__a )
return (new_height, new_width)
class lowerCamelCase (__lowercase ):
'''simple docstring'''
_snake_case : Optional[Any] = ["pixel_values"]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = False , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_5_5 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
super().__init__(**_a )
UpperCAmelCase_ : Optional[int] = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_a )
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : Dict = size
UpperCAmelCase_ : str = keep_aspect_ratio
UpperCAmelCase_ : Any = ensure_multiple_of
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : List[Any] = do_rescale
UpperCAmelCase_ : int = rescale_factor
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = 1 , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : str = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(
_a , output_size=(size['height'], size['width']) , keep_aspect_ratio=_a , multiple=_a , )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> int:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> PIL.Image.Image:
UpperCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Union[str, Any] = size if size is not None else self.size
UpperCAmelCase_ : str = get_size_dict(_a )
UpperCAmelCase_ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCAmelCase_ : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Tuple = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
UpperCAmelCase_ : int = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
UpperCAmelCase_ : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
UpperCAmelCase_ : Optional[int] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
UpperCAmelCase_ : int = [to_channel_dimension_format(_a , _a ) for image in images]
UpperCAmelCase_ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Any:
UpperCAmelCase_ : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_a ):
UpperCAmelCase_ : List[Any] = target_sizes.numpy()
UpperCAmelCase_ : str = []
for idx in range(len(_a ) ):
UpperCAmelCase_ : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_a )
UpperCAmelCase_ : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
UpperCAmelCase_ : Tuple = logits.argmax(dim=1 )
UpperCAmelCase_ : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 29 |
import logging
from transformers import PretrainedConfig
a__ = logging.getLogger(__name__)
a__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "bertabs"
def __init__( self , _a=3_0_5_2_2 , _a=5_1_2 , _a=6 , _a=5_1_2 , _a=8 , _a=5_1_2 , _a=0.2 , _a=6 , _a=7_6_8 , _a=8 , _a=2_0_4_8 , _a=0.2 , **_a , ) -> Any:
super().__init__(**_a )
_a : int = vocab_size
_a : List[str] = max_pos
_a : Tuple = enc_layers
_a : Optional[Any] = enc_hidden_size
_a : int = enc_heads
_a : Optional[Any] = enc_ff_size
_a : List[str] = enc_dropout
_a : Tuple = dec_layers
_a : Optional[Any] = dec_hidden_size
_a : Optional[Any] = dec_heads
_a : Optional[Any] = dec_ff_size
_a : List[Any] = dec_dropout
| 235 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowercase :
__lowercase : List[Any] = 42
__lowercase : Any = None
__lowercase : Union[str, Any] = None
_UpperCAmelCase : Dict = namedtuple("CoinsDistribResult", "moves excess")
def A ( lowercase ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__a ) != count_coins(__a ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCamelCase = get_distrib(node.left )
UpperCamelCase = get_distrib(node.right )
UpperCamelCase = 1 - left_distrib_excess
UpperCamelCase = 1 - right_distrib_excess
UpperCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(__a )
+ abs(__a )
)
UpperCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__a , __a )
return get_distrib(__a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222 |
"""simple docstring"""
from math import factorial
def _A (__a = 20 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE_ : List[str] = n // 2
return int(factorial(__a ) / (factorial(__a ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCAmelCase_ : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 91 | 0 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = StableUnCLIPPipeline
_a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_a : Optional[Any] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 3_2
__lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
__lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 92 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A_ = logging.get_logger(__name__)
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = None
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Tuple, a_: int, a_: int, a_: str, **a_: Dict ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any], a_: List[str] ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCamelCase_ ( cls: Tuple ):
'''simple docstring'''
return f"`pip install {cls.pip_package or cls.name}`"
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "optuna"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: int, a_: str, **a_: List[str] ):
'''simple docstring'''
return run_hp_search_optuna(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Any ):
'''simple docstring'''
return default_hp_space_optuna(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "ray"
lowercase__ = "'ray[tune]'"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def UpperCamelCase_ ( self: int, a_: Optional[Any], a_: int, a_: str, **a_: List[Any] ):
'''simple docstring'''
return run_hp_search_ray(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Tuple ):
'''simple docstring'''
return default_hp_space_ray(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "sigopt"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: str, **a_: int ):
'''simple docstring'''
return run_hp_search_sigopt(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: List[str] ):
'''simple docstring'''
return default_hp_space_sigopt(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "wandb"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: str, **a_: Union[str, Any] ):
'''simple docstring'''
return run_hp_search_wandb(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Any ):
'''simple docstring'''
return default_hp_space_wandb(a_ )
A_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case__ ) > 0:
_snake_case : Any = available_backends[0].name
if len(snake_case__ ) > 1:
logger.info(
F"{len(snake_case__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 64 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A__ ( yaml.SafeLoader ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = [self.constructed_objects[key_node] for key_node, _ in node.value]
__lowerCAmelCase : Optional[int] = [tuple(_SCREAMING_SNAKE_CASE) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) else key for key in keys]
__lowerCAmelCase : Dict = Counter(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]=False) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = super().construct_mapping(_SCREAMING_SNAKE_CASE , deep=_SCREAMING_SNAKE_CASE)
self._check_no_duplicates_on_constructed_node(_SCREAMING_SNAKE_CASE)
return mapping
def _lowercase ( __snake_case ) -> Tuple[Optional[str], str]:
__lowerCAmelCase : Any = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__lowerCAmelCase : List[str] = full_content[1:].index("---" ) + 1
__lowerCAmelCase : str = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__snake_case )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[int] , _SCREAMING_SNAKE_CASE: Path) -> "DatasetMetadata":
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as readme_file:
__lowerCAmelCase : Dict = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(_SCREAMING_SNAKE_CASE)
else:
return cls()
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Path) -> Optional[int]:
"""simple docstring"""
if path.exists():
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as readme_file:
__lowerCAmelCase : List[Any] = readme_file.read()
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : int = self._to_readme(_SCREAMING_SNAKE_CASE)
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8") as readme_file:
readme_file.write(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: Optional[str] = None) -> str:
"""simple docstring"""
if readme_content is not None:
__lowerCAmelCase : Optional[Any] = _split_yaml_from_readme(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = "---\n" + self.to_yaml_string() + "---\n" + content
else:
__lowerCAmelCase : str = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> "DatasetMetadata":
"""simple docstring"""
__lowerCAmelCase : Tuple = yaml.load(_SCREAMING_SNAKE_CASE , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
__lowerCAmelCase : Union[str, Any] = {
(key.replace("-" , "_") if key.replace("-" , "_") in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_" , "-") if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_SCREAMING_SNAKE_CASE , allow_unicode=_SCREAMING_SNAKE_CASE , encoding="utf-8" , ).decode("utf-8")
__snake_case : int = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__snake_case : Union[str, Any] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
__snake_case : Optional[Any] = ap.parse_args()
__snake_case : Any = Path(args.readme_filepath)
__snake_case : int = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 367 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def _lowercase ( __snake_case ,__snake_case = 2 ,__snake_case = 1 ,__snake_case = 3 ,) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__snake_case ,__snake_case ,__snake_case ) -> int:
return (pow(__snake_case ,2 ) + step) % modulus
for _ in range(__snake_case ):
# These track the position within the cycle detection logic.
__lowerCAmelCase : Union[str, Any] = seed
__lowerCAmelCase : Optional[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__lowerCAmelCase : Tuple = rand_fn(__snake_case ,__snake_case ,__snake_case )
__lowerCAmelCase : Any = rand_fn(__snake_case ,__snake_case ,__snake_case )
__lowerCAmelCase : List[Any] = rand_fn(__snake_case ,__snake_case ,__snake_case )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__lowerCAmelCase : List[Any] = gcd(hare - tortoise ,__snake_case )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__lowerCAmelCase : str = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__snake_case : Any = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
__snake_case : List[str] = parser.parse_args()
__snake_case : List[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
__snake_case : Any = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""") | 58 | 0 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : Dict =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__lowerCAmelCase : List[str] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _A :
snake_case__ : Union[str, Any] = field(
default=_lowerCamelCase , metadata={'help': 'Model type selected in the list: ' + ', '.join(_lowerCamelCase )} )
snake_case__ : List[Any] = field(
default=_lowerCamelCase , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
snake_case__ : Union[str, Any] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
snake_case__ : Union[str, Any] = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
snake_case__ : Optional[Any] = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
snake_case__ : Tuple = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
snake_case__ : int = field(
default=_lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
snake_case__ : Optional[Any] = field(
default=_lowerCamelCase , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
snake_case__ : List[Any] = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
snake_case__ : Dict = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
snake_case__ : Union[str, Any] = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
snake_case__ : Any = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _A ( _lowerCamelCase ):
snake_case__ : Any = 'train'
snake_case__ : Dict = 'dev'
class _A ( _lowerCamelCase ):
snake_case__ : Any = 42
snake_case__ : Optional[Any] = 42
snake_case__ : Any = 42
snake_case__ : Union[str, Any] = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "pt" , ):
"""simple docstring"""
lowercase = args
lowercase = is_language_sensitive
lowercase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_ , lowercase_ ):
try:
lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
lowercase = mode
# Load data features from cache or dataset file
lowercase = """v2""" if args.version_2_with_negative else """v1"""
lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase = cached_features_file + """.lock"""
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
lowercase = time.time()
lowercase = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase = self.old_features["""features"""]
lowercase = self.old_features.get("""dataset""" , lowercase_ )
lowercase = self.old_features.get("""examples""" , lowercase_ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
lowercase = self.processor.get_dev_examples(args.data_dir )
else:
lowercase = self.processor.get_train_examples(args.data_dir )
lowercase , lowercase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase_ , )
lowercase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , lowercase_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.features[i]
lowercase = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase = torch.tensor(feature.start_position , dtype=torch.long )
lowercase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 197 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case_ = s_dict.pop(__UpperCAmelCase )
elif "subsample" in key:
snake_case_ = s_dict.pop(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ ,snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase )
snake_case_ = emb.weight.data
return lin_layer
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' )
snake_case_ = mam_aaa['''args''']
snake_case_ = mam_aaa['''model''']
snake_case_ = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__UpperCAmelCase )
rename_keys(__UpperCAmelCase )
snake_case_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case_ = args.share_decoder_input_output_embed
snake_case_ = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
snake_case_ = SpeechaTextConfig(
vocab_size=__UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(__UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=__UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=__UpperCAmelCase, num_beams=5, max_length=200, use_cache=__UpperCAmelCase, decoder_start_token_id=2, early_stopping=__UpperCAmelCase, )
snake_case_ = SpeechaTextForConditionalGeneration(__UpperCAmelCase )
snake_case_ ,snake_case_ = model.model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F" but all the following weights are missing {missing}" )
if tie_embeds:
snake_case_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ = lm_head_weights
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a : List[Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 56 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_: Tuple ={
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =[
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 364 | '''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : Dict = """t5"""
a__ : List[str] = ["""past_key_values"""]
a__ : Union[str, Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__(self : List[str] , __a : Any=32128 , __a : Optional[int]=512 , __a : Union[str, Any]=64 , __a : List[str]=2048 , __a : Optional[int]=6 , __a : Union[str, Any]=None , __a : Union[str, Any]=8 , __a : Dict=32 , __a : List[Any]=128 , __a : Optional[Any]=0.1 , __a : List[Any]=1E-6 , __a : str=1.0 , __a : Dict="relu" , __a : int=True , __a : int=True , __a : Optional[Any]=0 , __a : Dict=1 , **__a : List[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = feed_forward_proj
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = self.feed_forward_proj.split("-" )
UpperCAmelCase_ = act_info[-1]
UpperCAmelCase_ = act_info[0] == "gated"
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ = "gelu_new"
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
class __A ( UpperCamelCase__ ):
@property
def _lowercase (self : str ):
UpperCAmelCase_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCAmelCase_ = "past_encoder_sequence + sequence"
UpperCAmelCase_ = {0: "batch"}
UpperCAmelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="inputs" )
return common_inputs
@property
def _lowercase (self : Optional[int] ):
return 13
| 106 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case__ : int = tau * frequency / samplerate
snake_case__ : Tuple = sin(_lowerCAmelCase )
snake_case__ : Optional[int] = cos(_lowerCAmelCase )
snake_case__ : int = _sin / (2 * q_factor)
snake_case__ : Tuple = (1 - _cos) / 2
snake_case__ : int = 1 - _cos
snake_case__ : Optional[Any] = 1 + alpha
snake_case__ : Dict = -2 * _cos
snake_case__ : int = 1 - alpha
snake_case__ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case__ : Dict = tau * frequency / samplerate
snake_case__ : Union[str, Any] = sin(_lowerCAmelCase )
snake_case__ : Union[str, Any] = cos(_lowerCAmelCase )
snake_case__ : Optional[Any] = _sin / (2 * q_factor)
snake_case__ : Optional[int] = (1 + _cos) / 2
snake_case__ : int = -1 - _cos
snake_case__ : Tuple = 1 + alpha
snake_case__ : int = -2 * _cos
snake_case__ : List[Any] = 1 - alpha
snake_case__ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case__ : Optional[Any] = tau * frequency / samplerate
snake_case__ : List[str] = sin(_lowerCAmelCase )
snake_case__ : List[Any] = cos(_lowerCAmelCase )
snake_case__ : Optional[int] = _sin / (2 * q_factor)
snake_case__ : str = _sin / 2
snake_case__ : Tuple = 0
snake_case__ : Dict = -ba
snake_case__ : Optional[int] = 1 + alpha
snake_case__ : Dict = -2 * _cos
snake_case__ : Dict = 1 - alpha
snake_case__ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
snake_case__ : str = tau * frequency / samplerate
snake_case__ : Tuple = sin(_lowerCAmelCase )
snake_case__ : Dict = cos(_lowerCAmelCase )
snake_case__ : Any = _sin / (2 * q_factor)
snake_case__ : Any = 1 - alpha
snake_case__ : Optional[int] = -2 * _cos
snake_case__ : Dict = 1 + alpha
snake_case__ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
snake_case__ : Any = tau * frequency / samplerate
snake_case__ : int = sin(_lowerCAmelCase )
snake_case__ : Any = cos(_lowerCAmelCase )
snake_case__ : str = _sin / (2 * q_factor)
snake_case__ : List[Any] = 10 ** (gain_db / 40)
snake_case__ : str = 1 + alpha * big_a
snake_case__ : int = -2 * _cos
snake_case__ : List[Any] = 1 - alpha * big_a
snake_case__ : List[Any] = 1 + alpha / big_a
snake_case__ : str = -2 * _cos
snake_case__ : str = 1 - alpha / big_a
snake_case__ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
snake_case__ : Any = tau * frequency / samplerate
snake_case__ : Dict = sin(_lowerCAmelCase )
snake_case__ : Optional[int] = cos(_lowerCAmelCase )
snake_case__ : List[Any] = _sin / (2 * q_factor)
snake_case__ : Any = 10 ** (gain_db / 40)
snake_case__ : str = (big_a + 1) - (big_a - 1) * _cos
snake_case__ : int = (big_a + 1) + (big_a - 1) * _cos
snake_case__ : Any = (big_a - 1) - (big_a + 1) * _cos
snake_case__ : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
snake_case__ : Dict = 2 * sqrt(_lowerCAmelCase ) * alpha
snake_case__ : str = big_a * (pmc + aaa)
snake_case__ : str = 2 * big_a * mpc
snake_case__ : Optional[int] = big_a * (pmc - aaa)
snake_case__ : Tuple = ppmc + aaa
snake_case__ : Tuple = -2 * pmpc
snake_case__ : int = ppmc - aaa
snake_case__ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
snake_case__ : List[Any] = tau * frequency / samplerate
snake_case__ : Tuple = sin(_lowerCAmelCase )
snake_case__ : List[str] = cos(_lowerCAmelCase )
snake_case__ : Optional[int] = _sin / (2 * q_factor)
snake_case__ : Any = 10 ** (gain_db / 40)
snake_case__ : Tuple = (big_a + 1) - (big_a - 1) * _cos
snake_case__ : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
snake_case__ : int = (big_a - 1) - (big_a + 1) * _cos
snake_case__ : str = (big_a - 1) + (big_a + 1) * _cos
snake_case__ : Optional[int] = 2 * sqrt(_lowerCAmelCase ) * alpha
snake_case__ : Optional[int] = big_a * (ppmc + aaa)
snake_case__ : List[str] = -2 * big_a * pmpc
snake_case__ : Dict = big_a * (ppmc - aaa)
snake_case__ : List[Any] = pmc + aaa
snake_case__ : Any = 2 * mpc
snake_case__ : Optional[int] = pmc - aaa
snake_case__ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 35 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = """"""
else:
snake_case__ : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> Tuple:
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : int = 1_000
snake_case__ : Any = """huggingface/label-files"""
snake_case__ : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ : Tuple = 192
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 12
snake_case__ : Union[str, Any] = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ : str = 384
snake_case__ : Any = 1_536
snake_case__ : str = 12
snake_case__ : int = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ : Union[str, Any] = 1_024
snake_case__ : Any = 4_096
snake_case__ : List[Any] = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Optional[Any] = encoding["""pixel_values"""]
snake_case__ : Tuple = model(_lowerCAmelCase )
snake_case__ : Optional[int] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 35 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__A = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__A = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def snake_case_(_UpperCamelCase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_UpperCamelCase ):
_snake_case = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_UpperCamelCase , ''' ''' , _UpperCamelCase )
def white_space_fix(_UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(_UpperCamelCase ):
_snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) )
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
return int(normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase ) )
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = [any(compute_exact(_UpperCamelCase , _UpperCamelCase ) for ref in refs ) for pred, refs in zip(_UpperCamelCase , _UpperCamelCase )]
return (sum(_UpperCamelCase ) / len(_UpperCamelCase )) * 100
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
_snake_case = [rgram for rgrams in rgramslist for rgram in rgrams]
_snake_case = Counter(_UpperCamelCase )
_snake_case = Counter(_UpperCamelCase )
_snake_case = Counter()
for sgram, scount in sgramcounter.items():
_snake_case = scount * numref
_snake_case = Counter(_UpperCamelCase )
_snake_case = Counter()
for cgram, ccount in cgramcounter.items():
_snake_case = ccount * numref
# KEEP
_snake_case = sgramcounter_rep & cgramcounter_rep
_snake_case = keepgramcounter_rep & rgramcounter
_snake_case = sgramcounter_rep & rgramcounter
_snake_case = 0
_snake_case = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_snake_case = 1
_snake_case = 1
if len(_UpperCamelCase ) > 0:
_snake_case = keeptmpscorea / len(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_snake_case = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_snake_case = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_snake_case = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_snake_case = sgramcounter_rep - cgramcounter_rep
_snake_case = delgramcounter_rep - rgramcounter
_snake_case = sgramcounter_rep - rgramcounter
_snake_case = 0
_snake_case = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_snake_case = 1
if len(_UpperCamelCase ) > 0:
_snake_case = deltmpscorea / len(_UpperCamelCase )
# ADDITION
_snake_case = set(_UpperCamelCase ) - set(_UpperCamelCase )
_snake_case = set(_UpperCamelCase ) & set(_UpperCamelCase )
_snake_case = set(_UpperCamelCase ) - set(_UpperCamelCase )
_snake_case = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_snake_case = 1
_snake_case = 1
if len(_UpperCamelCase ) > 0:
_snake_case = addtmpscore / len(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_snake_case = addtmpscore / len(_UpperCamelCase )
_snake_case = 0
if addscore_precision > 0 or addscore_recall > 0:
_snake_case = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
_snake_case = len(_UpperCamelCase )
_snake_case = ssent.split(''' ''' )
_snake_case = csent.split(''' ''' )
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
_snake_case = []
for rsent in rsents:
_snake_case = rsent.split(''' ''' )
_snake_case = []
_snake_case = []
_snake_case = []
ragramslist.append(_UpperCamelCase )
for i in range(0 , len(_UpperCamelCase ) - 1 ):
if i < len(_UpperCamelCase ) - 1:
_snake_case = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 2:
_snake_case = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 3:
_snake_case = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_UpperCamelCase )
ragramslist.append(_UpperCamelCase )
ragramslist.append(_UpperCamelCase )
ragramslist.append(_UpperCamelCase )
for i in range(0 , len(_UpperCamelCase ) - 1 ):
if i < len(_UpperCamelCase ) - 1:
_snake_case = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 2:
_snake_case = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 3:
_snake_case = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_UpperCamelCase )
for i in range(0 , len(_UpperCamelCase ) - 1 ):
if i < len(_UpperCamelCase ) - 1:
_snake_case = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 2:
_snake_case = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_UpperCamelCase )
if i < len(_UpperCamelCase ) - 3:
_snake_case = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_UpperCamelCase )
((_snake_case), (_snake_case), (_snake_case)) = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((_snake_case), (_snake_case), (_snake_case)) = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((_snake_case), (_snake_case), (_snake_case)) = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((_snake_case), (_snake_case), (_snake_case)) = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_snake_case = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_snake_case = sum([delascore, delascore, delascore, delascore] ) / 4
_snake_case = sum([addascore, addascore, addascore, addascore] ) / 4
_snake_case = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_(_UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = "13a" , _UpperCamelCase = True ) -> Tuple:
"""simple docstring"""
if lowercase:
_snake_case = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_snake_case = sacrebleu.metrics.bleu._get_tokenizer(_UpperCamelCase )()(_UpperCamelCase )
else:
_snake_case = sacrebleu.TOKENIZERS[tokenizer]()(_UpperCamelCase )
elif tokenizer == "moses":
_snake_case = sacremoses.MosesTokenizer().tokenize(_UpperCamelCase , return_str=_UpperCamelCase , escape=_UpperCamelCase )
elif tokenizer == "penn":
_snake_case = sacremoses.MosesTokenizer().penn_tokenize(_UpperCamelCase , return_str=_UpperCamelCase )
else:
_snake_case = sentence
if not return_str:
_snake_case = normalized_sent.split()
return normalized_sent
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
if not (len(_UpperCamelCase ) == len(_UpperCamelCase ) == len(_UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_snake_case = 0
for src, pred, refs in zip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
sari_score += SARIsent(normalize(_UpperCamelCase ) , normalize(_UpperCamelCase ) , [normalize(_UpperCamelCase ) for sent in refs] )
_snake_case = sari_score / len(_UpperCamelCase )
return 100 * sari_score
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase="exp" , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , ) -> List[str]:
"""simple docstring"""
_snake_case = len(references[0] )
if any(len(_UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_snake_case = [[refs[i] for refs in references] for i in range(_UpperCamelCase )]
_snake_case = sacrebleu.corpus_bleu(
_UpperCamelCase , _UpperCamelCase , smooth_method=_UpperCamelCase , smooth_value=_UpperCamelCase , force=_UpperCamelCase , lowercase=_UpperCamelCase , use_effective_order=_UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def UpperCamelCase_ ( self : Optional[int] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def UpperCamelCase_ ( self : str , A__ : int , A__ : Tuple , A__ : int ) -> Optional[Any]:
_snake_case = {}
result.update({'''sari''': compute_sari(sources=A__ , predictions=A__ , references=A__ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=A__ , references=A__ )} )
result.update({'''exact''': compute_em(predictions=A__ , references=A__ )} )
return result
| 367 |
from math import factorial
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(_UpperCamelCase ) // (factorial(_UpperCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'''If a class of 40 students must be arranged into groups of''',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f'''are {combinations(10, 3)} ways that first, second and''',
'''third place can be awarded.''',
)
| 278 | 0 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase : Any = None
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : List[str] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase : Any = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ : Optional[int] = TaTokenizer
lowerCAmelCase__ : List[int] = []
def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
lowercase__ = extra_ids
@staticmethod
def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , )
return max_model_length
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
| 2 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCAmelCase__ : Union[str, Any] =logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 359 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCAmelCase__ : Optional[int] =logging.get_logger(__name__)
UpperCAmelCase__ : int ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ : List[str] ={
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
UpperCAmelCase__ : Any ={'''mobilebert-uncased''': 5_12}
UpperCAmelCase__ : Any ={}
class __A ( a ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_INIT_CONFIGURATION
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = MobileBertTokenizer
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_="[UNK]" , UpperCAmelCase_="[SEP]" , UpperCAmelCase_="[PAD]" , UpperCAmelCase_="[CLS]" , UpperCAmelCase_="[MASK]" , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase_ ) != tokenize_chinese_chars
):
lowerCamelCase =getattr(UpperCAmelCase_ , normalizer_state.pop("""type""" ) )
lowerCamelCase =do_lower_case
lowerCamelCase =strip_accents
lowerCamelCase =tokenize_chinese_chars
lowerCamelCase =normalizer_class(**UpperCAmelCase_ )
lowerCamelCase =do_lower_case
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=None ):
lowerCamelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =[self.sep_token_id]
lowerCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 262 | 0 |
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCAmelCase__ = min(UpperCamelCase_ , UpperCamelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase = [3, 3, 3, 3]
__lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase = [4, 4, 4, 4]
__lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase = [3, 3, 3, 3]
else:
__lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase = 96
elif "small" in model_name:
__lowercase = 96
elif "base" in model_name:
__lowercase = 128
elif "large" in model_name:
__lowercase = 192
elif "xlarge" in model_name:
__lowercase = 256
elif "huge" in model_name:
__lowercase = 352
# set label information
__lowercase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowercase = 'imagenet-22k-id2label.json'
else:
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
if "patch_embed.proj" in name:
__lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase = 'encoder.' + name
if "encoder.layers" in name:
__lowercase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowercase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowercase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowercase = 'layernorm.weight'
if name == "norm.bias":
__lowercase = 'layernorm.bias'
if "head" in name:
__lowercase = name.replace('head' , 'classifier' )
else:
__lowercase = 'focalnet.' + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]:
# fmt: off
__lowercase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowercase = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
__lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE )
__lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify conversion
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , )
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 325 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__: int = logging.get_logger(__name__)
__magic_name__: Tuple = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__magic_name__: str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
__magic_name__: Union[str, Any] = {"facebook/blenderbot_small-90M": 512}
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Tuple = set()
__magic_name__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : Union[str, Any] = char
__magic_name__ : int = set(_A )
return pairs
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="__start__" , lowerCAmelCase__="__end__" , lowerCAmelCase__="__unk__" , lowerCAmelCase__="__null__" , **lowerCAmelCase__ , ) -> int:
super().__init__(unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Any = json.load(lowerCAmelCase__ )
__magic_name__ : str = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : Any = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ : Tuple = [tuple(merge.split() ) for merge in merges]
__magic_name__ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Optional[Any] = {}
@property
def __magic_name__ ( self ) -> int:
return len(self.encoder )
def __magic_name__ ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
__magic_name__ : Optional[Any] = re.sub("""([.,!?()])""" , R""" \1""" , lowerCAmelCase__ )
__magic_name__ : str = re.sub("""(')""" , R""" \1 """ , lowerCAmelCase__ )
__magic_name__ : List[Any] = re.sub(R"""\s{2,}""" , """ """ , lowerCAmelCase__ )
if "\n" in token:
__magic_name__ : Any = token.replace("""\n""" , """ __newln__""" )
__magic_name__ : Tuple = token.split(""" """ )
__magic_name__ : Any = []
for token in tokens:
if not len(lowerCAmelCase__ ):
continue
__magic_name__ : str = token.lower()
__magic_name__ : Optional[Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__magic_name__ : List[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
words.append(lowerCAmelCase__ )
continue
while True:
__magic_name__ : Optional[Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : Tuple = bigram
__magic_name__ : Dict = []
__magic_name__ : Dict = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : List[str] = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : Any = get_pairs(lowerCAmelCase__ )
__magic_name__ : Optional[int] = """@@ """.join(lowerCAmelCase__ )
__magic_name__ : List[Any] = word[:-4]
__magic_name__ : Union[str, Any] = word
words.append(lowerCAmelCase__ )
return " ".join(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
__magic_name__ : Union[str, Any] = []
__magic_name__ : Tuple = re.findall(R"""\S+\n?""" , lowerCAmelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase__ ).split(""" """ ) ) )
return split_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
__magic_name__ : Dict = token.lower()
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
__magic_name__ : Dict = """ """.join(lowerCAmelCase__ ).replace("""@@ """ , """""" ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : List[str] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : List[str] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 138 |
from manim import *
class snake_case__ ( _lowerCAmelCase ):
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = Rectangle(height=0.5 , width=0.5 )
__magic_name__ : Optional[int] = Rectangle(height=0.2_5 , width=0.2_5 )
__magic_name__ : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__magic_name__ : List[Any] = [mem.copy() for i in range(6 )]
__magic_name__ : int = [mem.copy() for i in range(6 )]
__magic_name__ : Tuple = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : List[str] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : str = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : Union[str, Any] = Text("""CPU""" , font_size=24 )
__magic_name__ : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
__magic_name__ : Any = [mem.copy() for i in range(4 )]
__magic_name__ : List[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : Tuple = Text("""GPU""" , font_size=24 )
__magic_name__ : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = [mem.copy() for i in range(6 )]
__magic_name__ : Union[str, Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : str = Text("""Model""" , font_size=24 )
__magic_name__ : Optional[int] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
__magic_name__ : str = []
__magic_name__ : Tuple = []
__magic_name__ : Union[str, Any] = []
for i, rect in enumerate(lowerCAmelCase__ ):
rect.set_stroke(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCAmelCase__ , buff=0.0 )
self.add(lowerCAmelCase__ )
model_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ )
__magic_name__ : Optional[Any] = [mem.copy() for i in range(6 )]
__magic_name__ : Optional[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__magic_name__ : Optional[int] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCAmelCase__ )
__magic_name__ : Optional[int] = []
__magic_name__ : Tuple = []
for i, rect in enumerate(lowerCAmelCase__ ):
__magic_name__ : Dict = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 )
target.move_to(lowerCAmelCase__ )
ckpt_arr.append(lowerCAmelCase__ )
__magic_name__ : int = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ )
__magic_name__ : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__magic_name__ : str = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Any = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__magic_name__ : int = [meta_mem.copy() for i in range(6 )]
__magic_name__ : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
__magic_name__ : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : str = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : Tuple = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : int = Text("""Disk""" , font_size=24 )
__magic_name__ : Union[str, Any] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) , Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) )
__magic_name__ : List[Any] = []
for i, rect in enumerate(lowerCAmelCase__ ):
__magic_name__ : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(FadeOut(lowerCAmelCase__ ) )
__magic_name__ : str = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) )
self.play(
FadeOut(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) , )
self.wait()
| 138 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Tuple=30 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : str=0.02 , ) ->Optional[int]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = FlaxViTModel(config=lowercase_)
A__ = model(lowercase_)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
A__ = (self.image_size, self.image_size)
A__ = (self.patch_size, self.patch_size)
A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict) ->List[Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = FlaxViTForImageClassification(config=lowercase_)
A__ = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = FlaxViTForImageClassification(lowercase_)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(lowercase_)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : int) ->None:
'''simple docstring'''
A__ = FlaxViTModelTester(self)
A__ = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_)
A__ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
A__ = self._prepare_for_class(lowercase_ , lowercase_)
A__ = model_class(lowercase_)
@jax.jit
def model_jitted(UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Dict):
return model(pixel_values=lowercase_ , **lowercase_)
with self.subTest('''JIT Enabled'''):
A__ = model_jitted(**lowercase_).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
A__ = model_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained('''google/vit-base-patch16-224''')
A__ = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(lowercase_)
| 14 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :int , lowercase_ :Optional[int]=None , lowercase_ :List[str]=None ) -> str:
UpperCAmelCase = data
UpperCAmelCase = previous
UpperCAmelCase = next_node
def __str__( self :Optional[Any] ) -> str:
return f"""{self.data}"""
def UpperCAmelCase__ ( self :int ) -> int:
return self.data
def UpperCAmelCase__ ( self :List[str] ) -> Any:
return self.next
def UpperCAmelCase__ ( self :Tuple ) -> Optional[int]:
return self.previous
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :Optional[Any] ) -> str:
UpperCAmelCase = head
def __iter__( self :List[str] ) -> List[str]:
return self
def UpperCAmelCase__ ( self :int ) -> Any:
if not self.current:
raise StopIteration
else:
UpperCAmelCase = self.current.get_data()
UpperCAmelCase = self.current.get_next()
return value
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = None # First node in list
UpperCAmelCase = None # Last node in list
def __str__( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = self.head
UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase = current.get_next()
return " ".join(str(lowercase_ ) for node in nodes )
def __contains__( self :str , lowercase_ :int ) -> str:
UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase = current.get_next()
return False
def __iter__( self :Tuple ) -> Dict:
return LinkedListIterator(self.head )
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node ) -> None:
if self.head is None:
UpperCAmelCase = node
UpperCAmelCase = node
else:
self.insert_before_node(self.head , lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Node ) -> None:
if self.head is None:
self.set_head(lowercase_ )
else:
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int ) -> None:
UpperCAmelCase = Node(lowercase_ )
if self.head is None:
self.set_head(lowercase_ )
else:
self.set_tail(lowercase_ )
def UpperCAmelCase__ ( self :int , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.previous
if node.get_previous() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Node , lowercase_ :Node ) -> None:
UpperCAmelCase = node
UpperCAmelCase = node.next
if node.get_next() is None:
UpperCAmelCase = node_to_insert
else:
UpperCAmelCase = node_to_insert
UpperCAmelCase = node_to_insert
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = 1
UpperCAmelCase = Node(lowercase_ )
UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase_ , lowercase_ )
return
current_position += 1
UpperCAmelCase = node.next
self.insert_after_node(self.tail , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :int ) -> Node:
UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase = node.get_next()
raise Exception('Node not found' )
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] ) -> Dict:
if (node := self.get_node(lowercase_ )) is not None:
if node == self.head:
UpperCAmelCase = self.head.get_next()
if node == self.tail:
UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(lowercase_ )
@staticmethod
def UpperCAmelCase__ ( lowercase_ :Node ) -> None:
if node.get_next():
UpperCAmelCase = node.previous
if node.get_previous():
UpperCAmelCase = node.next
UpperCAmelCase = None
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
return self.head is None
def _lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
A_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : str , snake_case : str ):
'''simple docstring'''
super().__init__()
A__ : Optional[Any] = torchvision.models.resnetaaa(pretrained=snake_case )
A__ : int = list(model.children() )[:-2]
A__ : Optional[Any] = nn.Sequential(*snake_case )
A__ : Tuple = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _UpperCamelCase ( self : List[Any] , snake_case : str ):
'''simple docstring'''
A__ : List[str] = self.pool(self.model(snake_case ) )
A__ : str = torch.flatten(snake_case , start_dim=2 )
A__ : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Union[str, Any] , snake_case : str , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : str , snake_case : int ):
'''simple docstring'''
A__ : Optional[Any] = [json.loads(snake_case ) for l in open(snake_case )]
A__ : List[Any] = os.path.dirname(snake_case )
A__ : Any = tokenizer
A__ : int = labels
A__ : Optional[Any] = len(snake_case )
A__ : Dict = max_seq_length
A__ : Union[str, Any] = transforms
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Union[str, Any] , snake_case : Tuple ):
'''simple docstring'''
A__ : List[str] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=snake_case ) )
A__ , A__ , A__ : Union[str, Any] = sentence[0], sentence[1:-1], sentence[-1]
A__ : Tuple = sentence[: self.max_seq_length]
A__ : str = torch.zeros(self.n_classes )
A__ : List[str] = 1
A__ : Dict = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
A__ : Optional[Any] = self.transforms(snake_case )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : int = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->Optional[int]:
A__ : Union[str, Any] = [len(row["""sentence"""] ) for row in batch]
A__ , A__ : Union[str, Any] = len(UpperCAmelCase__ ), max(UpperCAmelCase__ )
A__ : Union[str, Any] = torch.zeros(UpperCAmelCase__, UpperCAmelCase__, dtype=torch.long )
A__ : Union[str, Any] = torch.zeros(UpperCAmelCase__, UpperCAmelCase__, dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(UpperCAmelCase__, UpperCAmelCase__ ) ):
A__ : Union[str, Any] = input_row["""sentence"""]
A__ : Union[str, Any] = 1
A__ : Dict = torch.stack([row["""image"""] for row in batch] )
A__ : Any = torch.stack([row["""label"""] for row in batch] )
A__ : List[str] = torch.stack([row["""image_start_token"""] for row in batch] )
A__ : List[Any] = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _lowerCAmelCase ( ) ->Union[str, Any]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _lowerCAmelCase ( ) ->Optional[int]:
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017], std=[0.1222_1994, 0.1214_5835, 0.1438_0469], ),
] )
| 296 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
A__ : int = nn.Linear(3 , 4 )
A__ : Union[str, Any] = nn.BatchNormad(4 )
A__ : Union[str, Any] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : str , snake_case : List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , model.state_dict() )
A__ : List[str] = os.path.join(snake_case , """index.json""" )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ : List[str] = os.path.join(snake_case , F'{key}.dat' )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ : str = torch.randn(2 , 3 , dtype=snake_case )
with TemporaryDirectory() as tmp_dir:
A__ : List[str] = offload_weight(snake_case , """weight""" , snake_case , {} )
A__ : Union[str, Any] = os.path.join(snake_case , """weight.dat""" )
self.assertTrue(os.path.isfile(snake_case ) )
self.assertDictEqual(snake_case , {"""weight""": {"""shape""": [2, 3], """dtype""": str(snake_case ).split(""".""" )[1]}} )
A__ : str = load_offloaded_weight(snake_case , index["""weight"""] )
self.assertTrue(torch.equal(snake_case , snake_case ) )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : str = ModelForTest()
A__ : Union[str, Any] = model.state_dict()
A__ : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
A__ : List[Any] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Dict = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
A__ : int = {k: v for k, v in state_dict.items() if """weight""" in k}
A__ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Optional[Any] = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
# Duplicates are removed
A__ : int = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
A__ : str = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1""": 0, """a.2""": 2} )
A__ : Dict = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
A__ : int = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1.a""": 0, """a.2.a""": 2} )
| 296 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self : str , A_ : Dict[str, int] , A_ : List[str] , A_ : int = None , A_ : int = None):
super().__init__()
lowerCAmelCase_ : Tuple = pad_token_id
lowerCAmelCase_ : List[str] = max_length
lowerCAmelCase_ : Optional[Any] = vocab
lowerCAmelCase_ : Optional[int] = merges
lowerCAmelCase_ : str = BytePairTokenizer(A_ , A_ , sequence_length=A_)
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , A_ : GPTaTokenizer , *A_ : Any , **A_ : List[str]):
lowerCAmelCase_ : List[str] = [''' '''.join(A_) for m in tokenizer.bpe_ranks.keys()]
lowerCAmelCase_ : Optional[Any] = tokenizer.get_vocab()
return cls(A_ , A_ , *A_ , **A_)
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , A_ : Union[str, os.PathLike] , *A_ : Optional[Any] , **A_ : Union[str, Any]):
lowerCAmelCase_ : List[Any] = GPTaTokenizer.from_pretrained(A_ , *A_ , **A_)
return cls.from_tokenizer(A_ , *A_ , **A_)
@classmethod
def UpperCAmelCase__ ( cls : Any , A_ : Dict):
return cls(**A_)
def UpperCAmelCase__ ( self : Optional[Any]):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int , A_ : int = None):
lowerCAmelCase_ : Optional[int] = self.tf_tokenizer(A_)
lowerCAmelCase_ : Any = tf.ones_like(A_)
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCAmelCase_ : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pad_model_inputs(
A_ , max_seq_length=A_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 103 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ : List[str] = logging.get_logger(__name__)
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ):
_a = '''maskformer-swin'''
_a = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , A_ : Dict=2_2_4 , A_ : Optional[Any]=4 , A_ : List[str]=3 , A_ : str=9_6 , A_ : Optional[Any]=[2, 2, 6, 2] , A_ : Tuple=[3, 6, 1_2, 2_4] , A_ : List[Any]=7 , A_ : List[Any]=4.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : int=0.0 , A_ : str=0.1 , A_ : Optional[int]="gelu" , A_ : List[Any]=False , A_ : int=0.02 , A_ : int=1e-5 , A_ : Optional[int]=None , A_ : List[str]=None , **A_ : List[Any] , ):
super().__init__(**A_)
lowerCAmelCase_ : Dict = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : Any = embed_dim
lowerCAmelCase_ : List[str] = depths
lowerCAmelCase_ : Union[str, Any] = len(A_)
lowerCAmelCase_ : List[str] = num_heads
lowerCAmelCase_ : Dict = window_size
lowerCAmelCase_ : Optional[int] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = drop_path_rate
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : str = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : str = int(embed_dim * 2 ** (len(A_) - 1))
lowerCAmelCase_ : Optional[Any] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(A_) + 1)]
lowerCAmelCase_ , lowerCAmelCase_ : int = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names)
| 103 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : int = 0
__UpperCamelCase : bool = False
__UpperCamelCase : float = 3.0
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Any ):
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=lowerCAmelCase_ ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Dict = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
_A: int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_A: Optional[int] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , lowerCAmelCase_ )
@require_multi_gpu
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Any = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
UpperCAmelCase__ : List[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
UpperCAmelCase__ : Optional[int] = torch.nn.Linear(100, 200)
UpperCAmelCase__ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
UpperCAmelCase__ : List[Any] = ''
UpperCAmelCase__ : Optional[int] = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 355 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lt__( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowerCamelCase__ ( a ) -> list:
_A: list[Stack] = []
# sort into stacks
for element in collection:
_A: Any = Stack([element] )
_A: Optional[Any] = bisect_left(a , a )
if i != len(a ):
stacks[i].append(a )
else:
stacks.append(a )
# use a heap-based merge to merge stack efficiently
_A: Tuple = merge(*(reversed(a ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 301 | 0 |
from __future__ import annotations
_a = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase = {}
_UpperCAmelCase = source_vertex
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {self.source_vertex}
_UpperCAmelCase = None
_UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCAmelCase )
_UpperCAmelCase = vertex
queue.append(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase = self.parent.get(UpperCAmelCase )
if target_vertex_parent is None:
_UpperCAmelCase = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(UpperCAmelCase )
return self.shortest_path(UpperCAmelCase ) + F"""->{target_vertex}"""
if __name__ == "__main__":
_a = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 39 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 264 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] = 1_00_00_00 ):
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
lowerCamelCase_ = 0
lowerCamelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase_ = counter
if counter > pre_counter:
lowerCamelCase_ = inputa
lowerCamelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 354 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Optional[Any] = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Union[str, Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase : Tuple = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowercase )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase ):
lowerCamelCase_ = None
if _re_tf_models.match(lowercase ) is not None:
lowerCamelCase_ = tf_models
lowerCamelCase_ = _re_tf_models.match(lowercase ).groups()[0]
elif _re_flax_models.match(lowercase ) is not None:
lowerCamelCase_ = flax_models
lowerCamelCase_ = _re_flax_models.match(lowercase ).groups()[0]
elif _re_pt_models.match(lowercase ) is not None:
lowerCamelCase_ = pt_models
lowerCamelCase_ = _re_pt_models.match(lowercase ).groups()[0]
if lookup_dict is not None:
while len(lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCamelCase_ = True
break
# Try again after removing the last word in the name
lowerCamelCase_ = ''.join(camel_case_split(lowercase )[:-1] )
lowerCamelCase_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCamelCase_ = list(lowercase )
all_models.sort()
lowerCamelCase_ = {'model_type': all_models}
lowerCamelCase_ = [pt_models[t] for t in all_models]
lowerCamelCase_ = [tf_models[t] for t in all_models]
lowerCamelCase_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCamelCase_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCamelCase_ = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCamelCase_ = 'AutoTokenizer'
lowerCamelCase_ = [processors[t] for t in all_models]
return pd.DataFrame(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCamelCase_ = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
lowerCamelCase_ = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase , lowercase , lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase , lowercase ):
continue
# First extract all model_names
lowerCamelCase_ = []
for name in getattr(lowercase , lowercase ).values():
if isinstance(lowercase , lowercase ):
model_names.append(lowercase )
else:
model_names.extend(list(lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = get_frameworks_table()
lowerCamelCase_ = Dataset.from_pandas(lowercase )
lowerCamelCase_ = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowercase )
lowerCamelCase_ = Dataset.from_json(lowercase )
lowerCamelCase_ = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowercase ) )
}
lowerCamelCase_ = update_pipeline_and_auto_class_table(lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCamelCase_ = sorted(table.keys() )
lowerCamelCase_ = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
lowerCamelCase_ = Dataset.from_pandas(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowercase , 'pipeline_tags.json' ) )
if commit_sha is not None:
lowerCamelCase_ = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
lowerCamelCase_ = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowercase , repo_type='dataset' , token=lowercase , commit_message=lowercase , )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCamelCase_ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCamelCase_ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCamelCase_ = pipeline_tasks[key]['pt']
if isinstance(lowercase , (list, tuple) ):
lowerCamelCase_ = model[0]
lowerCamelCase_ = model.__name__
if model not in in_table.values():
missing.append(lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = ', '.join(lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
lowerCamelCase : Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 208 | 0 |
import math
def _a ( a :int ) -> bool:
return math.sqrt(a ) * math.sqrt(a ) == num
def _a ( a :int ) -> bool:
a = 0
a = n
while left <= right:
a = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
a = mid - 1
else:
a = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase : int = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase : Any = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_UpperCAmelCase : Optional[int] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_UpperCAmelCase : str = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_UpperCAmelCase : Any = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_UpperCAmelCase : Any = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_UpperCAmelCase : List[Any] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __lowerCAmelCase ( a_):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowerCAmelCase ( a_):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Tuple = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_UpperCAmelCase : Dict = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_UpperCAmelCase : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(a_)
class __lowerCAmelCase :
def __call__( self: List[Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[Any] = None , _lowerCAmelCase: Union[str, Any] = None , _lowerCAmelCase: Union[str, Any] = False , _lowerCAmelCase: Tuple = False , _lowerCAmelCase: Dict = None , _lowerCAmelCase: List[str] = None , _lowerCAmelCase: Optional[Any] = None , **_lowerCAmelCase: Optional[Any] , ):
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
lowercase :Tuple = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
lowercase :Optional[Any] = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
lowercase :List[Any] = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
lowercase :str = len(lowercase_ )
lowercase :Union[str, Any] = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
F"There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts." )
lowercase :Optional[int] = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )["input_ids"]
lowercase :Optional[Any] = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )["input_ids"]
lowercase :List[str] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
lowercase :Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase :str = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int = 16 , _lowerCAmelCase: List[Any] = 64 , _lowerCAmelCase: List[str] = 4 , ):
lowercase :int = reader_input["input_ids"]
lowercase , lowercase , lowercase :str = reader_output[:3]
lowercase :Dict = len(lowercase_ )
lowercase :List[str] = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
lowercase :Union[str, Any] = []
for doc_id in sorted_docs:
lowercase :Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase :Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase :Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
lowercase :Tuple = len(lowercase_ )
lowercase :Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[Any] , ):
lowercase :Tuple = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase :Optional[Any] = sorted(lowercase_ , key=lambda _lowerCAmelCase : x[1] , reverse=lowercase_ )
lowercase :List[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]" )
lowercase :Optional[int] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_)
class __lowerCAmelCase ( a_ , a_):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ['''input_ids''', '''attention_mask''']
| 364 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase__ ( lowerCamelCase ):
if is_torch_version("<", "2.0.0" ) or not hasattr(lowerCamelCase, "_dynamo" ):
return False
return isinstance(lowerCamelCase, torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = True ):
lowercase :Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase :str = is_compiled_module(lowerCamelCase )
if is_compiled:
lowercase :str = model
lowercase :str = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Any = model.module
if not keep_fpaa_wrapper:
lowercase :List[Any] = getattr(lowerCamelCase, "forward" )
lowercase :Union[str, Any] = model.__dict__.pop("_original_forward", lowerCamelCase )
if original_forward is not None:
while hasattr(lowerCamelCase, "__wrapped__" ):
lowercase :Tuple = forward.__wrapped__
if forward == original_forward:
break
lowercase :Tuple = forward
if getattr(lowerCamelCase, "_converted_to_transformer_engine", lowerCamelCase ):
convert_model(lowerCamelCase, to_transformer_engine=lowerCamelCase )
if is_compiled:
lowercase :List[Any] = model
lowercase :Optional[int] = compiled_model
return model
def UpperCAmelCase__ ( ):
PartialState().wait_for_everyone()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase, lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase, lowerCamelCase )
@contextmanager
def UpperCAmelCase__ ( **lowerCamelCase ):
for key, value in kwargs.items():
lowercase :List[str] = str(lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase__ ( lowerCamelCase ):
if not hasattr(lowerCamelCase, "__qualname__" ) and not hasattr(lowerCamelCase, "__name__" ):
lowercase :Optional[int] = getattr(lowerCamelCase, "__class__", lowerCamelCase )
if hasattr(lowerCamelCase, "__qualname__" ):
return obj.__qualname__
if hasattr(lowerCamelCase, "__name__" ):
return obj.__name__
return str(lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for key, value in source.items():
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = destination.setdefault(lowerCamelCase, {} )
merge_dicts(lowerCamelCase, lowerCamelCase )
else:
lowercase :Optional[Any] = value
return destination
def UpperCAmelCase__ ( lowerCamelCase = None ):
if port is None:
lowercase :Tuple = 29500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 158 | 0 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__A = TypeVar("T")
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
return (position - 1) // 2
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
return (2 * position) + 1
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
return (2 * position) + 2
class UpperCAmelCase (Generic[T] ):
"""simple docstring"""
def __init__( self ):
lowercase__: list[tuple[T, int]] = []
lowercase__: dict[T, int] = {}
lowercase__: int = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def _snake_case ( self ):
# Check if the priority queue is empty
return self.elements == 0
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
lowercase__: int = self.elements
self.elements += 1
self._bubble_up(__lowerCamelCase )
def _snake_case ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowercase__: Optional[Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowercase__: str = self.heap[0]
self._bubble_down(__lowerCamelCase )
return elem
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
# Update the weight of the given key
lowercase__: Optional[Any] = self.position_map[elem]
lowercase__: Any = (elem, weight)
if position > 0:
lowercase__: Any = get_parent_position(__lowerCamelCase )
lowercase__: Tuple = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCamelCase )
else:
self._bubble_down(__lowerCamelCase )
else:
self._bubble_down(__lowerCamelCase )
def _snake_case ( self , _UpperCAmelCase ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
lowercase__: Optional[int] = self.position_map[elem]
if curr_pos == 0:
return None
lowercase__: Optional[Any] = get_parent_position(__lowerCamelCase )
lowercase__: Any = self.heap[curr_pos]
lowercase__: str = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCamelCase , __lowerCamelCase )
return self._bubble_up(__lowerCamelCase )
return None
def _snake_case ( self , _UpperCAmelCase ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
lowercase__: Any = self.position_map[elem]
lowercase__: Optional[int] = self.heap[curr_pos]
lowercase__: Optional[int] = get_child_left_position(__lowerCamelCase )
lowercase__: List[str] = get_child_right_position(__lowerCamelCase )
if child_left_position < self.elements and child_right_position < self.elements:
lowercase__: str = self.heap[child_left_position]
lowercase__: List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCamelCase , __lowerCamelCase )
return self._bubble_down(__lowerCamelCase )
if child_left_position < self.elements:
lowercase__: int = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCamelCase , __lowerCamelCase )
return self._bubble_down(__lowerCamelCase )
else:
return None
if child_right_position < self.elements:
lowercase__: int = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCamelCase , __lowerCamelCase )
return self._bubble_down(__lowerCamelCase )
return None
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
# Swap the nodes at the given positions
lowercase__: str = self.heap[nodea_pos][0]
lowercase__: List[Any] = self.heap[nodea_pos][0]
lowercase__: Dict = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowercase__: Tuple = nodea_pos
lowercase__: int = nodea_pos
class UpperCAmelCase (Generic[T] ):
"""simple docstring"""
def __init__( self ):
lowercase__: dict[T, dict[T, int]] = {}
lowercase__: int = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def _snake_case ( self , _UpperCAmelCase ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
lowercase__: Optional[int] = {}
self.nodes += 1
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Add an edge between 2 nodes in the graph
self.add_node(__lowerCamelCase )
self.add_node(__lowerCamelCase )
lowercase__: Optional[int] = weight
lowercase__: str = weight
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , ) -> Any:
lowercase__: dict[T, int] = {node: maxsize for node in graph.connections}
lowercase__: dict[T, T | None] = {node: None for node in graph.connections}
lowercase__: MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCamelCase__ , UpperCamelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
lowercase__: List[str] = priority_queue.extract_min()
lowercase__: Tuple = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__: Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase__ , dist[neighbour] )
lowercase__: int = node
# running prim's algorithm
while not priority_queue.is_empty():
lowercase__: Optional[Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__: Optional[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase__ , dist[neighbour] )
lowercase__: Any = node
return dist, parent
| 177 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
'''simple docstring'''
import os
def lowercase_ ( ):
"""simple docstring"""
with open(os.path.dirname(lowerCAmelCase__ ) + """/grid.txt""" ) as f:
__UpperCAmelCase : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCAmelCase__ ) for x in f.readline().split()] )
__UpperCAmelCase : Any = 0
# right
for i in range(20 ):
for j in range(17 ):
__UpperCAmelCase : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__UpperCAmelCase : str = temp
# down
for i in range(17 ):
for j in range(20 ):
__UpperCAmelCase : List[str] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__UpperCAmelCase : Union[str, Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__UpperCAmelCase : Optional[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__UpperCAmelCase : Union[str, Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__UpperCAmelCase : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__UpperCAmelCase : Union[str, Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 354 |
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( lowerCAmelCase__ : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__UpperCAmelCase : Any = nums[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
__UpperCAmelCase : Union[str, Any] = nums[i]
__UpperCAmelCase : List[Any] = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_UpperCamelCase = int(input('''Enter number of elements : ''').strip())
_UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 16 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = StableDiffusionXLImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__a = PipelineTesterMixin.required_optional_params - {"""latents"""}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=_lowerCamelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_snake_case = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
_snake_case = CLIPTextModel(_lowerCamelCase )
_snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_lowerCamelCase )
_snake_case = CLIPTextModelWithProjection(_lowerCamelCase )
_snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_lowerCamelCase )
_snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowercase ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : int=0 ):
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
_snake_case = image / 2 + 0.5
if str(_lowerCamelCase ).startswith('''mps''' ):
_snake_case = torch.manual_seed(_lowerCamelCase )
else:
_snake_case = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def lowercase ( self : Optional[int] ):
_snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionXLImgaImgPipeline(**_lowerCamelCase )
_snake_case = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
_snake_case = self.get_dummy_inputs(_lowerCamelCase )
_snake_case = sd_pipe(**_lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Dict ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase ( self : List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase ( self : str ):
pass
def lowercase ( self : List[str] ):
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionXLImgaImgPipeline(**_lowerCamelCase )
_snake_case = sd_pipe.to(_lowerCamelCase )
_snake_case = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
# forward without prompt embeds
_snake_case = self.get_dummy_inputs(_lowerCamelCase )
_snake_case = 3 * ['''this is a negative prompt''']
_snake_case = negative_prompt
_snake_case = 3 * [inputs['''prompt''']]
_snake_case = sd_pipe(**_lowerCamelCase )
_snake_case = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_snake_case = self.get_dummy_inputs(_lowerCamelCase )
_snake_case = 3 * ['''this is a negative prompt''']
_snake_case = 3 * [inputs.pop('''prompt''' )]
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = sd_pipe.encode_prompt(_lowerCamelCase , negative_prompt=_lowerCamelCase )
_snake_case = sd_pipe(
**_lowerCamelCase , prompt_embeds=_lowerCamelCase , negative_prompt_embeds=_lowerCamelCase , pooled_prompt_embeds=_lowerCamelCase , negative_pooled_prompt_embeds=_lowerCamelCase , )
_snake_case = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]="cpu" , _lowerCamelCase : List[str]=torch.floataa , _lowerCamelCase : int=0 ):
_snake_case = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
_snake_case = np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 64, 64) )
_snake_case = torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase )
_snake_case = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase ( self : List[Any] ):
_snake_case = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_snake_case = self.get_inputs(_lowerCamelCase )
_snake_case = pipe(**_lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 288 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=10 , _lowerCamelCase : Tuple=[10, 20, 30, 40] , _lowerCamelCase : int=[1, 1, 2, 1] , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = embeddings_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = scope
_snake_case = len(_lowerCamelCase )
def lowercase ( self : Optional[int] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Tuple ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
_snake_case = TFResNetModel(config=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ):
_snake_case = self.num_labels
_snake_case = TFResNetForImageClassification(_lowerCamelCase )
_snake_case = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__a = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : List[Any] ):
_snake_case = TFResNetModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : List[Any] ):
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def lowercase ( self : Any ):
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def lowercase ( self : List[str] ):
pass
def lowercase ( self : int ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
_snake_case = model_class(_lowerCamelCase )
_snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_snake_case = layer_type
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase ( self : List[str] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFResNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase ( self : List[Any] ):
_snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' )
# forward pass
_snake_case = model(**_lowerCamelCase )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_snake_case = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
| 288 | 1 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Optional[int] ):
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: int = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
if len(snake_case_ ) == 0 or len(snake_case_ ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(snake_case_ ) )
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Any = [sequences]
UpperCamelCase_: List[str] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(snake_case_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_A )
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Tuple , snake_case_ : Optional[int]=ZeroShotClassificationArgumentHandler() , *snake_case_ : Union[str, Any] , **snake_case_ : Union[str, Any] ):
UpperCamelCase_: Union[str, Any] = args_parser
super().__init__(*snake_case_ , **snake_case_ )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def lowerCAmelCase__ ( self : str ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Any , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : Tuple=TruncationStrategy.ONLY_FIRST , **snake_case_ : str ):
UpperCamelCase_: Optional[Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCamelCase_: List[Any] = self.tokenizer.eos_token
try:
UpperCamelCase_: Optional[int] = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , truncation=snake_case_ , )
except Exception as e:
if "too short" in str(snake_case_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCamelCase_: List[str] = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self : int , **snake_case_ : Optional[int] ):
if kwargs.get("""multi_class""" , snake_case_ ) is not None:
UpperCamelCase_: Dict = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCamelCase_: Any = {}
if "candidate_labels" in kwargs:
UpperCamelCase_: Any = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCamelCase_: Optional[int] = kwargs["""hypothesis_template"""]
UpperCamelCase_: Any = {}
if "multi_label" in kwargs:
UpperCamelCase_: str = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , snake_case_ : Union[str, List[str]] , *snake_case_ : Optional[Any] , **snake_case_ : int , ):
if len(snake_case_ ) == 0:
pass
elif len(snake_case_ ) == 1 and "candidate_labels" not in kwargs:
UpperCamelCase_: Optional[int] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : List[Any] , snake_case_ : Tuple=None , snake_case_ : Dict="This example is {}." ):
UpperCamelCase_: List[str] = self._args_parser(snake_case_ , snake_case_ , snake_case_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(snake_case_ , snake_case_ ) ):
UpperCamelCase_: List[str] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(snake_case_ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: int = inputs["""candidate_label"""]
UpperCamelCase_: Union[str, Any] = inputs["""sequence"""]
UpperCamelCase_: Optional[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCamelCase_: Optional[Any] = self.model(**snake_case_ )
UpperCamelCase_: List[Any] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self : Dict , snake_case_ : str , snake_case_ : List[str]=False ):
UpperCamelCase_: Optional[int] = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCamelCase_: str = [outputs["""sequence"""] for outputs in model_outputs]
UpperCamelCase_: int = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCamelCase_: Optional[Any] = logits.shape[0]
UpperCamelCase_: Any = len(snake_case_ )
UpperCamelCase_: str = N // n
UpperCamelCase_: Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(snake_case_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCamelCase_: Optional[Any] = self.entailment_id
UpperCamelCase_: List[str] = -1 if entailment_id == 0 else 0
UpperCamelCase_: str = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCamelCase_: List[str] = np.exp(snake_case_ ) / np.exp(snake_case_ ).sum(-1 , keepdims=snake_case_ )
UpperCamelCase_: Optional[int] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCamelCase_: List[str] = reshaped_outputs[..., self.entailment_id]
UpperCamelCase_: Optional[int] = np.exp(snake_case_ ) / np.exp(snake_case_ ).sum(-1 , keepdims=snake_case_ )
UpperCamelCase_: List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 368 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A__ ( ) -> Union[str, Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCamelCase_: Optional[int] = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A__ ( ) -> Union[str, Any]:
assert _test_patching.open is open
UpperCamelCase_: List[Any] = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , lowerCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A__ ( ) -> Optional[Any]:
# pandas.read_csv is not present in _test_patching
UpperCamelCase_: Optional[Any] = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , lowerCamelCase ):
pass
def A__ ( ) -> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
UpperCamelCase_: List[Any] = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , lowerCamelCase ) is None
with patch_submodule(_test_patching , """len""" , lowerCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def A__ ( ) -> Any:
UpperCamelCase_: Dict = """__test_patch_submodule_start_and_stop_mock__"""
UpperCamelCase_: List[str] = patch_submodule(_test_patching , """open""" , lowerCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A__ ( ) -> List[str]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCamelCase_: Optional[Any] = """__test_patch_submodule_successive_join__"""
UpperCamelCase_: Any = """__test_patch_submodule_successive_dirname__"""
UpperCamelCase_: Dict = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.rename""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A__ ( ) -> Union[str, Any]:
UpperCamelCase_: Dict = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , lowerCamelCase ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , lowerCamelCase ):
pass
| 223 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( a__ , a__ , a__ , unittest.TestCase ):
lowercase__ = StableDiffusionInpaintPipeline
lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=9 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=lowerCamelCase__ ,)
_UpperCamelCase : Tuple = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
_UpperCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
_UpperCamelCase : Any = CLIPTextModel(lowerCamelCase__ )
_UpperCamelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[int]=0 ):
'''simple docstring'''
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_UpperCamelCase : List[str] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_UpperCamelCase : Dict = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_UpperCamelCase : int = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((64, 64) )
_UpperCamelCase : Union[str, Any] = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(lowerCamelCase__ ).startswith('mps' ):
_UpperCamelCase : Optional[Any] = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCamelCase : Dict = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCamelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Union[str, Any] = self.get_dummy_components()
_UpperCamelCase : int = StableDiffusionInpaintPipeline(**lowerCamelCase__ )
_UpperCamelCase : Any = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCamelCase : Any = sd_pipe(**lowerCamelCase__ ).images
_UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_UpperCamelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_UpperCamelCase : Tuple = 'stabilityai/stable-diffusion-2-inpainting'
_UpperCamelCase : List[str] = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase__ ,safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
_UpperCamelCase : List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCamelCase : Tuple = torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,generator=lowerCamelCase__ ,output_type='np' ,)
_UpperCamelCase : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_UpperCamelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_UpperCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_UpperCamelCase : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
_UpperCamelCase : Any = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ ,torch_dtype=torch.floataa ,safety_checker=lowerCamelCase__ ,)
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
_UpperCamelCase : Any = 'Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,generator=lowerCamelCase__ ,output_type='np' ,)
_UpperCamelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_UpperCamelCase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_UpperCamelCase : List[str] = 'stabilityai/stable-diffusion-2-inpainting'
_UpperCamelCase : Optional[int] = PNDMScheduler.from_pretrained(lowerCamelCase__ ,subfolder='scheduler' )
_UpperCamelCase : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ ,safety_checker=lowerCamelCase__ ,scheduler=lowerCamelCase__ ,torch_dtype=torch.floataa ,)
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase : Optional[int] = 'Face of a yellow cat, high resolution, sitting on a park bench'
_UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCamelCase : Any = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type='np' ,)
_UpperCamelCase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 83 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( a__ ):
def __init__( self , a , a , a = None , a = None , a = False , **a , ):
super().__init__(features=a , cache_dir=a , keep_in_memory=a , **a )
UpperCamelCase__ = Sql(
cache_dir=a , features=a , sql=a , con=a , **a , )
def __a ( self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , )
# Build dataset for splits
UpperCamelCase__ = self.builder.as_dataset(
split="train" , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , a , a , a , a = None , a = None , **a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase__ = dataset
UpperCamelCase__ = name
UpperCamelCase__ = con
UpperCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ = num_proc
UpperCamelCase__ = to_sql_kwargs
def __a ( self ):
UpperCamelCase__ = self.to_sql_kwargs.pop("sql" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("con" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("index" , a )
UpperCamelCase__ = self._write(index=a , **self.to_sql_kwargs )
return written
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = args
UpperCamelCase__ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase__ = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase__ = batch.to_pandas()
UpperCamelCase__ = df.to_sql(self.name , self.con , index=a , **a )
return num_rows or len(a )
def __a ( self , a , **a ):
UpperCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase__ , UpperCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 80 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 361 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase__ = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowerCAmelCase__ = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
lowerCAmelCase__ = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def snake_case_ (self , __a , __a , __a = CHRF.CHAR_ORDER , __a = CHRF.WORD_ORDER , __a = CHRF.BETA , __a = False , __a = False , __a = False , ) -> Tuple:
UpperCamelCase = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCamelCase = [[refs[i] for refs in references] for i in range(__a )]
UpperCamelCase = CHRF(__a , __a , __a , __a , __a , __a )
UpperCamelCase = sb_chrf.corpus_score(__a , __a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 244 | 0 |
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int ):
while a != 0:
_A , _A : Optional[Any] = b % a, a
return b
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if gcd(UpperCamelCase__ , UpperCamelCase__ ) != 1:
_A : int = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCamelCase__ )
_A , _A , _A : Dict = 1, 0, a
_A , _A , _A : Any = 0, 1, m
while va != 0:
_A : List[Any] = ua // va
_A , _A , _A , _A , _A , _A : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 11 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case : str = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : str=None ):
'''simple docstring'''
if rng is None:
_a = random.Random()
_a = 1
for dim in shape:
total_dims *= dim
_a = []
for _ in range(UpperCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_a = np.array(UpperCamelCase , dtype=jnp.intaa ).reshape(UpperCamelCase )
return output
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_a = ids_tensor(UpperCamelCase , vocab_size=2 , rng=UpperCamelCase )
# make sure that at least one token is attended to for each batch
_a = 1
return attn_mask
@require_flax
class A :
lowercase_ = None
lowercase_ = ()
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_a = 2
_a = inputs['''input_ids'''].shape[-1] // 2
_a = inputs['''input_ids'''][:max_batch_size, :sequence_length]
_a = jnp.ones_like(lowerCAmelCase_ )
_a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 0
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model_class.__name__[4:] # Skip the "Flax" at the beginning
_a = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_a = pt_model_class(lowerCAmelCase_ ).eval()
_a = load_flax_weights_in_pytorch_model(lowerCAmelCase_ , flax_model.params )
_a = flax_model.generate(lowerCAmelCase_ ).sequences
_a = pt_model.generate(torch.tensor(lowerCAmelCase_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = True
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 2
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 2
_a = 2
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = True
_a = max_length
_a = 0.8
_a = 10
_a = 0.3
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = max_length
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = max_length
_a = 2
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = False
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = True
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = 2
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
_a = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_a = '''Hello world'''
_a = tokenizer(lowerCAmelCase_ , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase_ , '''do_samples''' ):
model.generate(lowerCAmelCase_ , do_samples=lowerCAmelCase_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase_ , '''foo''' ):
_a = {'''foo''': '''bar'''}
model.generate(lowerCAmelCase_ , **lowerCAmelCase_ )
| 179 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str=0 ):
'''simple docstring'''
return sorted(UpperCamelCase , key=lambda UpperCamelCase : x[column] )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any]=float('''inf''' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCamelCase ):
_a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_a = current_dis
return min_dis
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[str]=float('''inf''' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , UpperCamelCase ):
for j in range(max(0 , i - 6 ) , UpperCamelCase ):
_a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_a = current_dis
return min_dis
def snake_case_ (UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(UpperCamelCase , UpperCamelCase )
# recursion
_a = points_counts // 2
_a = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[:mid] , UpperCamelCase )
_a = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[mid:] , points_counts - mid )
_a = min(UpperCamelCase , UpperCamelCase )
_a = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCamelCase )
_a = dis_between_closest_in_strip(
UpperCamelCase , len(UpperCamelCase ) , UpperCamelCase )
return min(UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = column_based_sort(UpperCamelCase , column=0 )
_a = column_based_sort(UpperCamelCase , column=1 )
return (
closest_pair_of_points_sqr(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
) ** 0.5
if __name__ == "__main__":
_snake_case : int = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 179 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 |
"""simple docstring"""
__magic_name__ = "Tobias Carryer"
from time import time
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=int(time())): # noqa: B008
__SCREAMING_SNAKE_CASE = multiplier
__SCREAMING_SNAKE_CASE = increment
__SCREAMING_SNAKE_CASE = modulo
__SCREAMING_SNAKE_CASE = seed
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__magic_name__ = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 100 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 10.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
from itertools import product
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = sides_number
snake_case_ = max_face_number * dice_number
snake_case_ = [0] * (max_total + 1)
snake_case_ = 1
snake_case_ = range(SCREAMING_SNAKE_CASE__ , max_face_number + 1 )
for dice_numbers in product(SCREAMING_SNAKE_CASE__ , repeat=SCREAMING_SNAKE_CASE__ ):
snake_case_ = sum(SCREAMING_SNAKE_CASE__ )
totals_frequencies[total] += 1
return totals_frequencies
def __SCREAMING_SNAKE_CASE ():
snake_case_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
snake_case_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
snake_case_ = 0
snake_case_ = 9
snake_case_ = 4 * 9
snake_case_ = 6
for peter_total in range(SCREAMING_SNAKE_CASE__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case_ = (4**9) * (6**6)
snake_case_ = peter_wins_count / total_games_number
snake_case_ = round(SCREAMING_SNAKE_CASE__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = eval_examples
lowercase_ : Tuple = post_process_function
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "eval" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = gen_kwargs.copy()
lowercase_ : List[str] = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase_ : str = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase_ : Dict = gen_kwargs
lowercase_ : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase_ : List[str] = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Union[str, Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : Tuple = time.time()
lowercase_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase_ : Optional[Any] = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : List[Any] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
lowercase_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = gen_kwargs.copy()
lowercase_ : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Optional[Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = time.time()
lowercase_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : Tuple = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase_ : Any = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''' )
lowercase_ : str = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : Optional[int] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 93 | 0 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
A__ : List[Any] =prime_factors(__snake_case )
if is_square_free(__snake_case ):
return -1 if len(__snake_case ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A__ : Any =4
A__ : int =(1 << p) - 1
for _ in range(p - 2 ):
A__ : Dict =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 136 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowercase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowercase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowercase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowercase , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowercase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowercase , type=lowercase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowercase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : List[Any] ) -> Tuple:
def fn(lowercase : Optional[Any] ):
return tokenizer(examples["text"] )
return fn
def _lowerCamelCase ( lowercase : List[Any] ) -> Dict:
_a = []
for i in range(len(tokenized_data["input_ids"] ) ):
_a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
_a = tf.train.Features(feature=lowercase )
_a = tf.train.Example(features=lowercase )
_a = example.SerializeToString()
records.append(lowercase )
return records
def _lowerCamelCase ( lowercase : Dict ) -> str:
_a = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_a = min(len(lowercase ) , args.limit )
_a = dataset.select(range(lowercase ) )
print(F'Limiting the dataset to {args.limit} entries.' )
_a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_a = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
_a = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_a = tokenize_function(lowercase )
_a = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase : Tuple ):
# Concatenate all texts.
_a = {k: sum(examples[k] , [] ) for k in examples.keys()}
_a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_a = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_a = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
_a = 0
_a = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
_a = grouped_dataset[shard : shard + args.shard_size]
_a = len(dataset_snapshot["input_ids"] )
_a = os.path.join(lowercase , F'dataset-{shard_count}-{records_containing}.tfrecord' )
_a = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
_a = serialized_examples[i]
out_file.write(lowercase )
print("Wrote file {} containing {} records".format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = parse_args()
main(args)
| 63 |
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = len(snake_case_ )
for i in range(snake_case_ ):
for j in range(i + 1 , snake_case_ ):
if numbers[j] < numbers[i]:
_UpperCAmelCase , _UpperCAmelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
lowercase_ : Dict = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 133 | 0 |
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple=0 ):
"""simple docstring"""
if name is None:
UpperCamelCase__ : List[str] = None
else:
UpperCamelCase__ : int = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
UpperCamelCase__ : Optional[int] = fmt.format(_SCREAMING_SNAKE_CASE )
# Print and recurse (if needed).
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if msg is not None:
print(_SCREAMING_SNAKE_CASE )
for k in val.keys():
recursive_print(_SCREAMING_SNAKE_CASE , val[k] , spaces + 2 )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
print(_SCREAMING_SNAKE_CASE , ''':''' , val.size() )
else:
print(_SCREAMING_SNAKE_CASE , ''':''' , _SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase__ : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase__ : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase__ : Optional[Any] = param.view(*_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = param.transpose(0 , 2 )
UpperCamelCase__ : Optional[int] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase__ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase__ : List[Any] = param.view(*_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = param.transpose(0 , 1 ).contiguous()
UpperCamelCase__ : Any = param.view(*_SCREAMING_SNAKE_CASE )
return param
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase__ : int = {}
# old versions did not store training args
UpperCamelCase__ : Tuple = input_state_dict.get('''args''' , _SCREAMING_SNAKE_CASE )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase__ : Tuple = ds_args.padded_vocab_size
UpperCamelCase__ : Tuple = ds_args.max_position_embeddings
UpperCamelCase__ : int = ds_args.hidden_size
UpperCamelCase__ : Optional[int] = ds_args.num_layers
UpperCamelCase__ : Dict = ds_args.num_attention_heads
UpperCamelCase__ : Tuple = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase__ : Optional[Any] = config.n_head
# The hidden_size per head.
UpperCamelCase__ : Dict = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase__ : Any = input_state_dict["checkpoint_version"]
else:
UpperCamelCase__ : List[str] = 0.0
# The model.
UpperCamelCase__ : Optional[int] = input_state_dict["model"]
# The language model.
UpperCamelCase__ : List[str] = model["language_model"]
# The embeddings.
UpperCamelCase__ : Union[str, Any] = lm["embedding"]
# The word embeddings.
UpperCamelCase__ : str = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
UpperCamelCase__ : Optional[Any] = word_embeddings[: config.vocab_size, :]
UpperCamelCase__ : List[Any] = word_embeddings
# The position embeddings.
UpperCamelCase__ : Optional[Any] = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase__ : Optional[Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match" )
# Store the position embeddings.
UpperCamelCase__ : List[Any] = pos_embeddings
# The transformer.
UpperCamelCase__ : Tuple = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
UpperCamelCase__ : List[str] = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
UpperCamelCase__ : List[str] = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase__ : Optional[int] = layer_re.match(_SCREAMING_SNAKE_CASE )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase__ : Union[str, Any] = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase__ : Any = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase__ : Optional[Any] = m.group(3 )
# The name of the layer.
UpperCamelCase__ : List[str] = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
UpperCamelCase__ : int = "ln_1" if op_name.startswith('''input''' ) else "ln_2"
UpperCamelCase__ : int = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase__ : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase__ : Optional[int] = torch.tensor(-1E4 , dtype=torch.floataa )
UpperCamelCase__ : int = masked_bias
UpperCamelCase__ : Union[str, Any] = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase__ : Optional[int] = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase__ : Optional[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase__ : List[str] = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Store. No change of shape.
UpperCamelCase__ : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase__ : Union[str, Any] = megatron_to_transformers[op_name]
UpperCamelCase__ : List[str] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase__ : Dict = megatron_to_transformers[op_name]
UpperCamelCase__ : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase__ : Any = transformer["final_layernorm.weight"]
UpperCamelCase__ : int = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase__ : List[Any] = word_embeddings
# It should be done!
return output_state_dict
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=_SCREAMING_SNAKE_CASE , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=_SCREAMING_SNAKE_CASE , help='''An optional config json file describing the pre-trained model.''' , )
UpperCamelCase__ : Tuple = parser.parse_args()
# Extract the basename.
UpperCamelCase__ : Union[str, Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
UpperCamelCase__ : Dict = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
else:
UpperCamelCase__ : List[Any] = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
UpperCamelCase__ : List[str] = input_state_dict.get('''args''' , _SCREAMING_SNAKE_CASE )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase__ : Tuple = "gelu_fast"
elif ds_args.openai_gelu:
UpperCamelCase__ : Any = "gelu_new"
else:
UpperCamelCase__ : Union[str, Any] = "gelu"
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase__ : Tuple = "gelu_new"
# Spell out all parameters in case the defaults change.
UpperCamelCase__ : str = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=_SCREAMING_SNAKE_CASE , summary_activation=_SCREAMING_SNAKE_CASE , summary_proj_to_labels=_SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=50256 , eos_token_id=50256 , )
else:
UpperCamelCase__ : Optional[Any] = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase__ : Any = ["GPT2LMHeadModel"]
# Convert.
print('''Converting''' )
UpperCamelCase__ : List[Any] = convert_megatron_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase__ : Optional[int] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase__ : int = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase__ : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
UpperCamelCase__ : Tuple = "gpt2"
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = type(_SCREAMING_SNAKE_CASE ).__name__
UpperCamelCase__ : List[Any] = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# Store the state_dict to file.
UpperCamelCase__ : Any = os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 350 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Any = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCamelCase__ : Union[str, Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : List[Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase__ : Dict = DDPMScheduler()
UpperCamelCase__ : List[Any] = AudioDiffusionPipeline(vqvae=lowerCamelCase__ , unet=self.dummy_unet , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : List[str] = pipe(generator=lowerCamelCase__ , steps=4 )
UpperCamelCase__ : Union[str, Any] = output.audios[0]
UpperCamelCase__ : Any = output.images[0]
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : int = pipe(generator=lowerCamelCase__ , steps=4 , return_dict=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase__ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : Any = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : str = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ : List[str] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase__ : Optional[Any] = DDIMScheduler()
UpperCamelCase__ : Union[str, Any] = self.dummy_vqvae_and_unet
UpperCamelCase__ : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ : Optional[int] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : Union[str, Any] = pipe(raw_audio=lowerCamelCase__ , generator=lowerCamelCase__ , start_step=5 , steps=10 )
UpperCamelCase__ : int = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase__ : List[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ : Any = self.dummy_unet_condition
UpperCamelCase__ : str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCamelCase__ , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : Any = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ : Union[str, Any] = torch.rand((1, 1, 10) )
UpperCamelCase__ : int = pipe(generator=lowerCamelCase__ , encoding=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = output.images[0]
UpperCamelCase__ : List[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : str = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = torch_device
UpperCamelCase__ : Dict = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCamelCase__ : Optional[int] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : Optional[Any] = pipe(generator=lowerCamelCase__ )
UpperCamelCase__ : Dict = output.audios[0]
UpperCamelCase__ : Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase__ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : List[Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 51 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a = logging.get_logger(__name__) # pylint: disable=invalid-name
a = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : int=8 ) -> Any:
'''simple docstring'''
_A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase_ ( A__ ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : DDPMScheduler , _UpperCAmelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , movq=_UpperCAmelCase , )
_A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ):
if latents is None:
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A = latents.to(_UpperCAmelCase )
_A = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_A = torch.device(F'''cuda:{gpu_id}''' )
_A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[str, Any]=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_A = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A = cpu_offload_with_hook(_UpperCAmelCase , _UpperCAmelCase , prev_module_hook=_UpperCAmelCase )
# We'll offload the last model manually.
_A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self : Any ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCAmelCase )
def __call__( self : Any , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 100 , _UpperCAmelCase : float = 4.0 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ):
_A = self._execution_device
_A = guidance_scale > 1.0
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = torch.cat(_UpperCAmelCase , dim=0 )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = torch.cat(_UpperCAmelCase , dim=0 )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = torch.cat(_UpperCAmelCase , dim=0 )
_A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_A = image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
_A = negative_image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
_A = hint.repeat_interleave(_UpperCAmelCase , dim=0 )
_A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCAmelCase )
_A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase , device=_UpperCAmelCase )
_A = self.scheduler.timesteps
_A = self.movq.config.latent_channels
_A = downscale_height_and_width(_UpperCAmelCase , _UpperCAmelCase , self.movq_scale_factor )
# create initial latent
_A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = {"image_embeds": image_embeds, "hint": hint}
_A = self.unet(
sample=_UpperCAmelCase , timestep=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , added_cond_kwargs=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
if do_classifier_free_guidance:
_A = noise_pred.split(latents.shape[1] , dim=1 )
_A = noise_pred.chunk(2 )
_A = variance_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase , )[0]
# post-processing
_A = self.movq.decode(_UpperCAmelCase , force_not_quantize=_UpperCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_A = image * 0.5 + 0.5
_A = image.clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 315 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
while b:
UpperCamelCase__ , UpperCamelCase__ : int = b, a % b
return a
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def SCREAMING_SNAKE_CASE ( ) -> str:
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main() | 189 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a__ = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : str
UpperCAmelCase__ : List[str]
UpperCAmelCase__ : Optional[List[str]]
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : List[int]
UpperCAmelCase__ : List[int]
UpperCAmelCase__ : Optional[List[int]] = None
UpperCAmelCase__ : Optional[List[int]] = None
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "train"
UpperCAmelCase__ : Dict = "dev"
UpperCAmelCase__ : Optional[int] = "test"
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def __lowercase ( _a , _a ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def __lowercase ( _a ) -> List[str]:
raise NotImplementedError
@staticmethod
def __lowercase ( _a , _a , _a , _a , _a=False , _a="[CLS]" , _a=1 , _a="[SEP]" , _a=False , _a=False , _a=0 , _a=0 , _a=-1_0_0 , _a=0 , _a=True , ) -> List[InputFeatures]:
_a : str = {label: i for i, label in enumerate(_a )}
_a : Any = []
for ex_index, example in enumerate(_a ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' , _a , len(_a ) )
_a : Optional[int] = []
_a : List[str] = []
for word, label in zip(example.words , example.labels ):
_a : str = tokenizer.tokenize(_a )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_a ) > 0:
tokens.extend(_a )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_a ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Any = tokenizer.num_special_tokens_to_add()
if len(_a ) > max_seq_length - special_tokens_count:
_a : Tuple = tokens[: (max_seq_length - special_tokens_count)]
_a : Optional[int] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : List[str] = [sequence_a_segment_id] * len(_a )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Tuple = [cls_token] + tokens
_a : Optional[Any] = [pad_token_label_id] + label_ids
_a : Dict = [cls_token_segment_id] + segment_ids
_a : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Tuple = [1 if mask_padding_with_zero else 0] * len(_a )
# Zero-pad up to the sequence length.
_a : Dict = max_seq_length - len(_a )
if pad_on_left:
_a : Dict = ([pad_token] * padding_length) + input_ids
_a : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : str = ([pad_token_segment_id] * padding_length) + segment_ids
_a : Tuple = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_a ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_a ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_a ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_a ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_a ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Dict = None
features.append(
InputFeatures(
input_ids=_a , attention_mask=_a , token_type_ids=_a , label_ids=_a ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[InputFeatures]
UpperCAmelCase__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , _a , _a , _a , _a , _a , _a = None , _a=False , _a = Split.train , ) -> List[Any]:
# Load data features from cache or dataset file
_a : int = os.path.join(
_a , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_a ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_a ):
if os.path.exists(_a ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_a : Tuple = torch.load(_a )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_a : Optional[int] = token_classification_task.read_examples_from_file(_a , _a )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] = token_classification_task.convert_examples_to_features(
_a , _a , _a , _a , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_a , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _a )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self , _a ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : List[InputFeatures]
UpperCAmelCase__ : int = -100
def __init__( self , _a , _a , _a , _a , _a , _a = None , _a=False , _a = Split.train , ) -> List[Any]:
_a : Dict = token_classification_task.read_examples_from_file(_a , _a )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[Any] = token_classification_task.convert_examples_to_features(
_a , _a , _a , _a , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_a , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Optional[Any] = tf.data.Dataset.from_generator(
_a , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a : Dict = tf.data.Dataset.from_generator(
_a , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowercase ( self ) -> Optional[int]:
_a : Tuple = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , _a ) -> InputFeatures:
return self.features[i]
| 365 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase : List[str] = logging.getLogger()
UpperCAmelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _A( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A ):
os.makedirs(_A , exist_ok=_A )
__A : Dict = {'source': 'What is love ?', 'target': 'life'}
__A : Dict = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__A : Tuple = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(_A , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(_A )
def UpperCAmelCase_ ( self , _A , _A = "pytorch" ):
__A : List[Any] = self.get_auto_remove_tmp_dir()
__A : int = os.path.join(_A , 'output' )
__A : str = os.path.join(_A , 'data' )
self._create_dummy_data(data_dir=_A )
__A : Dict = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
__A : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_A , env=self.get_env() )
__A : Dict = os.path.join(_A , 'metrics.json' )
with open(_A ) as f:
__A : List[Any] = json.load(_A )
return result
@require_torch_gpu
def UpperCAmelCase_ ( self ):
__A : int = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCAmelCase_ ( self ):
__A : Dict = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCAmelCase_ ( self ):
__A : Tuple = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 280 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
if components is None:
__A : int = []
__A : Tuple = list(_A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_A , self.__components ) ) + ")"
def __add__( self , _A ):
__A : Optional[int] = len(self )
if size == len(_A ):
__A : Any = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('must have the same size' )
def __sub__( self , _A ):
__A : Tuple = len(self )
if size == len(_A ):
__A : Union[str, Any] = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , (float, int) ):
__A : str = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A , _A ) and len(self ) == len(_A ):
__A : Union[str, Any] = len(self )
__A : Dict = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase_ ( self ):
return Vector(self.__components )
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase_ ( self , _A , _A ):
assert -len(self.__components ) <= pos < len(self.__components )
__A : Optional[int] = value
def UpperCAmelCase_ ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__A : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def UpperCAmelCase_ ( self , _A , _A = False ):
__A : Optional[Any] = self * other
__A : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _SCREAMING_SNAKE_CASE ( a ) -> Vector:
assert isinstance(a , a )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Vector:
assert isinstance(a , a ) and (isinstance(a , a ))
__A : Optional[Any] = [0] * dimension
__A : Tuple = 1
return Vector(a )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
random.seed(a )
__A : str = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : Optional[Any] = matrix
__A : Dict = w
__A : Optional[int] = h
def __str__( self ):
__A : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Optional[Any] = []
for i in range(self.__height ):
__A : Optional[Any] = [
self.__matrix[i][j] + other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Tuple = []
for i in range(self.__height ):
__A : str = [
self.__matrix[i][j] - other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , _A ): # matrix-vector
if len(_A ) == self.__width:
__A : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__A : List[str] = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A , sum(_A ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_A , (int, float) ): # matrix-scalar
__A : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A , self.__width , self.__height )
return None
def UpperCAmelCase_ ( self ):
return self.__height
def UpperCAmelCase_ ( self ):
return self.__width
def UpperCAmelCase_ ( self , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
__A : int = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
__A : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A , _A )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase_ ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__A : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width )
]
return sum(_A )
def _SCREAMING_SNAKE_CASE ( a ) -> Matrix:
__A : list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Matrix:
random.seed(a )
__A : list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 280 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCamelCase : str = logging.get_logger(__name__)
class __lowercase (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> int:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 367 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[str] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 176 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_snake_case = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 294 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict:
if k in (0.0_4, 0.0_6):
_a : List[str] = k
_a : List[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Dict ) -> str:
return str(self.k )
def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]:
_a : Dict = cva.imread(UpperCAmelCase__ , 0 )
_a , _a : List[Any] = img.shape
_a : list[list[int]] = []
_a : List[Any] = img.copy()
_a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB )
_a , _a : Any = np.gradient(UpperCAmelCase__ )
_a : Tuple = dx**2
_a : Union[str, Any] = dy**2
_a : Union[str, Any] = dx * dy
_a : int = 0.0_4
_a : List[str] = self.window_size // 2
for y in range(UpperCAmelCase__ , h - offset ):
for x in range(UpperCAmelCase__ , w - offset ):
_a : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Any = (wxx * wyy) - (wxy**2)
_a : Tuple = wxx + wyy
_a : Any = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case , _snake_case = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 294 | 1 |
"""simple docstring"""
import os
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = {
"""en""": """Machine learning is great, isn\'t it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowercase : int = {
"""wmt16-en-de-dist-12-1""": [2_8.3, 2_7.5_2],
"""wmt16-en-de-dist-6-1""": [2_7.4, 2_7.1_1],
"""wmt16-en-de-12-1""": [2_6.9, 2_5.7_5],
}
_lowercase : Optional[int] = F"""{src_lang}-{tgt_lang}"""
_lowercase : Union[str, Any] = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowercase : Union[str, Any] = os.path.join(__UpperCAmelCase , """README.md""" )
print(F"""Generating {path}""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCAmelCase )
# make sure we are under the root of the project
UpperCAmelCase: Union[str, Any] = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase: Any = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCAmelCase: Any = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 353 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase__ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 241 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""DPTFeatureExtractor"""]
UpperCAmelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 295 |
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_a = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]:
"""simple docstring"""
for attribute in key.split('.' ):
_UpperCAmelCase = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
_UpperCAmelCase = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "running_mean":
_UpperCAmelCase = value
elif weight_type == "running_var":
_UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
_UpperCAmelCase = value
elif weight_type == "inv_freq":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(lowerCAmelCase_ )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , lowerCAmelCase_ )
if "pos_bias_u" in name:
_UpperCAmelCase = None
elif "pos_bias_v" in name:
_UpperCAmelCase = None
elif "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "bias" in name:
_UpperCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase = 'weight'
elif "running_mean" in name:
_UpperCAmelCase = 'running_mean'
elif "inv_freq" in name:
_UpperCAmelCase = 'inv_freq'
elif "running_var" in name:
_UpperCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
_UpperCAmelCase = 'num_batches_tracked'
else:
_UpperCAmelCase = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
"""simple docstring"""
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True )-> Optional[int]:
"""simple docstring"""
if config_path is not None:
_UpperCAmelCase = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act='swish' )
else:
_UpperCAmelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_UpperCAmelCase = 'rotary'
if is_finetuned:
if dict_path:
_UpperCAmelCase = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.eos_index
_UpperCAmelCase = len(target_dict.symbols )
_UpperCAmelCase = os.path.join(lowerCAmelCase_ , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCAmelCase = 0
_UpperCAmelCase = 1
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase_ , )
_UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
_UpperCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
_UpperCAmelCase = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
_UpperCAmelCase = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
_UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_UpperCAmelCase = argparse.Namespace(task='audio_pretraining' )
_UpperCAmelCase = fairseq.tasks.setup_task(lowerCAmelCase_ )
_UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
_UpperCAmelCase = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_a = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 39 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89 | 0 |
'''simple docstring'''
from timeit import timeit
UpperCAmelCase = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) // 2
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_SCREAMING_SNAKE_CASE ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) <= 2:
return True
if s[0] == s[len(_SCREAMING_SNAKE_CASE ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
return s == s[::-1]
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
lowerCAmelCase = f'all({name}(key) is value for key, value in test_data.items())'
lowerCAmelCase = f'from __main__ import test_data, {name}'
lowerCAmelCase = 500_000
lowerCAmelCase = timeit(stmt=_SCREAMING_SNAKE_CASE , setup=_SCREAMING_SNAKE_CASE , number=_SCREAMING_SNAKE_CASE )
print(f'{name:<35} finished {number:,} runs in {result:.5f} seconds' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal') | 187 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "time_series_transformer"
UpperCAmelCase : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 64 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.0_2 , A_=True , **A_ , ) -> Optional[Any]:
# time series specific configuration
lowerCAmelCase = prediction_length
lowerCAmelCase = context_length or prediction_length
lowerCAmelCase = distribution_output
lowerCAmelCase = loss
lowerCAmelCase = input_size
lowerCAmelCase = num_time_features
lowerCAmelCase = lags_sequence
lowerCAmelCase = scaling
lowerCAmelCase = num_dynamic_real_features
lowerCAmelCase = num_static_real_features
lowerCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = cardinality
else:
lowerCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = embedding_dimension
else:
lowerCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase = input_size * len(A_ ) + self._number_of_features
lowerCAmelCase = d_model
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = decoder_layers
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = use_cache
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def __snake_case ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 187 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class A__ :
_UpperCAmelCase :str
_UpperCAmelCase :str = None
@staticmethod
def __UpperCamelCase( ):
'''simple docstring'''
raise NotImplementedError
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
raise NotImplementedError
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
raise NotImplementedError
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def __UpperCamelCase( cls ):
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class A__ ( UpperCamelCase__ ):
_UpperCAmelCase :int = """optuna"""
@staticmethod
def __UpperCamelCase( ):
'''simple docstring'''
return is_optuna_available()
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
return run_hp_search_optuna(__a , __a , __a , **__a )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return default_hp_space_optuna(__a )
class A__ ( UpperCamelCase__ ):
_UpperCAmelCase :Dict = """ray"""
_UpperCAmelCase :List[Any] = """'ray[tune]'"""
@staticmethod
def __UpperCamelCase( ):
'''simple docstring'''
return is_ray_available()
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
return run_hp_search_ray(__a , __a , __a , **__a )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return default_hp_space_ray(__a )
class A__ ( UpperCamelCase__ ):
_UpperCAmelCase :Tuple = """sigopt"""
@staticmethod
def __UpperCamelCase( ):
'''simple docstring'''
return is_sigopt_available()
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
return run_hp_search_sigopt(__a , __a , __a , **__a )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return default_hp_space_sigopt(__a )
class A__ ( UpperCamelCase__ ):
_UpperCAmelCase :List[str] = """wandb"""
@staticmethod
def __UpperCamelCase( ):
'''simple docstring'''
return is_wandb_available()
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
return run_hp_search_wandb(__a , __a , __a , **__a )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return default_hp_space_wandb(__a )
__lowerCamelCase : int = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def A_ ( ) -> str:
UpperCamelCase : Dict = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case_ ) > 0:
UpperCamelCase : str = available_backends[0].name
if len(snake_case_ ) > 1:
logger.info(
F"""{len(snake_case_ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 52 | '''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | 0 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCamelCase__( a__ ):
def a__( self : Optional[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Union[str, Any] )-> int:
"""simple docstring"""
if tokenize_kwargs is None:
UpperCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
UpperCAmelCase = truncation
UpperCAmelCase = tokenize_kwargs
UpperCAmelCase = {}
if return_tensors is not None:
UpperCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def a__( self : Dict , lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any] )-> Dict[str, GenericTensor]:
"""simple docstring"""
UpperCAmelCase = self.framework
UpperCAmelCase = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
return model_inputs
def a__( self : int , lowerCAmelCase : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model(**_lowerCamelCase )
return model_outputs
def a__( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Any=False )-> int:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Tuple , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any] )-> Any:
"""simple docstring"""
return super().__call__(*_lowerCamelCase , **_lowerCamelCase )
| 369 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : str )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
if self.add_downsample:
UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=True )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
if self.add_downsample:
UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=True )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = ()
for resnet in self.resnets:
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
if self.add_upsample:
UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any]=True )-> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCAmelCase = res_hidden_states_tuple[-1]
UpperCAmelCase = res_hidden_states_tuple[:-1]
UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
UpperCAmelCase = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
if self.add_upsample:
UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=True )-> Tuple:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase = res_hidden_states_tuple[-1]
UpperCAmelCase = res_hidden_states_tuple[:-1]
UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
UpperCAmelCase = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCAmelCase = []
for _ in range(self.num_layers ):
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
def __call__( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Any=True )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.resnets[0](lowerCAmelCase , lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
return hidden_states
| 91 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : torch.FloatTensor
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowercase = 3 , lowercase = 3 , lowercase = ("DownEncoderBlock2D",) , lowercase = ("UpDecoderBlock2D",) , lowercase = (64,) , lowercase = 1 , lowercase = "silu" , lowercase = 3 , lowercase = 32 , lowercase = 256 , lowercase = 32 , lowercase = None , lowercase = 0.1_82_15 , lowercase = "group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
a__ : Dict = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
a__ : Union[str, Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
a__ : Any = nn.Convad(__A , __A , 1)
a__ : Dict = VectorQuantizer(__A , __A , beta=0.25 , remap=__A , sane_index_shape=__A)
a__ : Optional[int] = nn.Convad(__A , __A , 1)
# pass init params to Decoder
a__ : Optional[Any] = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def __lowercase ( self , lowercase , lowercase = True) -> List[Any]:
'''simple docstring'''
a__ : Dict = self.encoder(__A)
a__ : Union[str, Any] = self.quant_conv(__A)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A)
@apply_forward_hook
def __lowercase ( self , lowercase , lowercase = False , lowercase = True) -> Any:
'''simple docstring'''
if not force_not_quantize:
a__ : Any = self.quantize(__A)
else:
a__ : Dict = h
a__ : Tuple = self.post_quant_conv(__A)
a__ : Any = self.decoder(__A , quant if self.config.norm_type == 'spatial' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A)
def __lowercase ( self , lowercase , lowercase = True) -> Optional[Any]:
'''simple docstring'''
a__ : int = sample
a__ : Optional[Any] = self.encode(__A).latents
a__ : List[str] = self.decode(__A).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A)
| 99 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 64, 64)
_snake_case = torch.rand(1) * 9_99
_snake_case = torch.randn(2, 77, 7_68)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_66
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 283 | 0 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
_SCREAMING_SNAKE_CASE = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def A ( self : int , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCamelCase__ )
return config
def A ( self : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**UpperCamelCase__ )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = 1_0, 0.0
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for t in scheduler.timesteps:
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def A ( self : Any ):
"""simple docstring"""
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase__ )
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
self.check_over_configs(thresholding=UpperCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , )
def A ( self : Optional[int] ):
"""simple docstring"""
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase__ , eta=UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.0_2 ) ) < 1E-5
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = 1_0, 0.0
scheduler.set_timesteps(UpperCamelCase__ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(UpperCamelCase__ )[0:3, None].repeat(1 , UpperCamelCase__ )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(UpperCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase__ )
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.full_loop()
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.full_loop(prediction_type='v_prediction' )
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.0_1 )
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.0_1 )
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
| 249 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Union[str, Any] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 249 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
for attribute in key.split("." ):
lowerCamelCase : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowerCamelCase : List[Any] = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).shape
else:
lowerCamelCase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase : str = value
elif weight_type == "weight_g":
lowerCamelCase : Optional[int] = value
elif weight_type == "weight_v":
lowerCamelCase : Dict = value
elif weight_type == "bias":
lowerCamelCase : Optional[int] = value
else:
lowerCamelCase : Tuple = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Tuple = fairseq_model.state_dict()
lowerCamelCase : int = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase : str = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,hf_model.config.feat_extract_norm == "group" ,)
lowerCamelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase : List[str] = True
if "*" in mapped_key:
lowerCamelCase : int = name.split(_SCREAMING_SNAKE_CASE )[0].split("." )[-2]
lowerCamelCase : Tuple = mapped_key.replace("*" ,_SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowerCamelCase : List[Any] = "weight_g"
elif "weight_v" in name:
lowerCamelCase : Tuple = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowerCamelCase : List[str] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase : Tuple = "weight"
else:
lowerCamelCase : Dict = None
set_recursively(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : List[Any] = full_name.split("conv_layers." )[-1]
lowerCamelCase : Union[str, Any] = name.split("." )
lowerCamelCase : Optional[Any] = int(items[0] )
lowerCamelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase : Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase : Optional[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Tuple:
# load the pre-trained checkpoints
lowerCamelCase : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = WavLMConfigOrig(checkpoint["cfg"] )
lowerCamelCase : Any = WavLMOrig(_SCREAMING_SNAKE_CASE )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowerCamelCase : Any = WavLMConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Dict = WavLMConfig()
lowerCamelCase : Optional[int] = WavLMModel(_SCREAMING_SNAKE_CASE )
recursively_load_weights(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
hf_wavlm.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 48 |
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self , __snake_case , __snake_case = None , __snake_case = None ) -> Dict:
'''simple docstring'''
super().__init__()
__a =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__a =torch.zeros(__snake_case , __snake_case )
else:
__a =None
__a =torch.nn.Parameter(__snake_case )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__snake_case , transformer=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> str:
'''simple docstring'''
__a =len(__snake_case ) if isinstance(__snake_case , __snake_case ) else 1
# get prompt text embeddings
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__a =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__a =text_input_ids[:, : self.tokenizer.model_max_length]
__a =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__a =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate text embeddings for each generation per prompt
__a =prompt_embeds.repeat_interleave(__snake_case , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__a =self.learned_classifier_free_sampling_embeddings.embeddings
__a =negative_prompt_embeds.unsqueeze(0 ).repeat(__snake_case , 1 , 1 )
else:
__a =[''] * batch_size
__a =text_input_ids.shape[-1]
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=__snake_case , truncation=__snake_case , return_tensors='pt' , )
__a =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__a =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a =negative_prompt_embeds.shape[1]
__a =negative_prompt_embeds.repeat(1 , __snake_case , 1 )
__a =negative_prompt_embeds.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __snake_case , __snake_case = 100 , __snake_case = 5.0 , __snake_case = 1.0 , __snake_case = 1 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , __snake_case = None , __snake_case = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
__a =1
elif isinstance(__snake_case , __snake_case ):
__a =len(__snake_case )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__snake_case )}' )
__a =batch_size * num_images_per_prompt
__a =guidance_scale > 1.0
__a =self._encode_prompt(__snake_case , __snake_case , __snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__snake_case )}.' )
# get the initial completely masked latents unless the user supplied it
__a =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
__a =self.transformer.num_vector_embeds - 1
__a =torch.full(__snake_case , __snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__a =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
__a =self.scheduler.timesteps.to(self.device )
__a =latents
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the sample if we are doing classifier free guidance
__a =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__a =self.transformer(__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case ).sample
if do_classifier_free_guidance:
__a , __a =model_output.chunk(2 )
__a =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__snake_case , dim=1 , keepdim=__snake_case )
__a =self.truncate(__snake_case , __snake_case )
# remove `log(0)`'s (`-inf`s)
__a =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__a =self.scheduler.step(__snake_case , timestep=__snake_case , sample=__snake_case , generator=__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
__a =self.vqvae.config.vq_embed_dim
__a =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__a =self.vqvae.quantize.get_codebook_entry(__snake_case , shape=__snake_case )
__a =self.vqvae.decode(__snake_case , force_not_quantize=__snake_case ).sample
__a =(image / 2 + 0.5).clamp(0 , 1 )
__a =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> torch.FloatTensor:
'''simple docstring'''
__a , __a =torch.sort(__snake_case , 1 , descending=__snake_case )
__a =torch.exp(__snake_case )
__a =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__a =torch.full_like(keep_mask[:, 0:1, :] , __snake_case )
__a =torch.cat((all_true, keep_mask) , dim=1 )
__a =keep_mask[:, :-1, :]
__a =keep_mask.gather(1 , indices.argsort(1 ) )
__a =log_p_x_0.clone()
__a =-torch.inf # -inf = log(0)
return rv
| 371 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Optional[int] = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'gpt_neox_japanese'
def __init__( self , _lowerCamelCase=3_2000 , _lowerCamelCase=2560 , _lowerCamelCase=32 , _lowerCamelCase=32 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=1.00 , _lowerCamelCase=1_0000 , _lowerCamelCase=2048 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=3_1996 , _lowerCamelCase=3_1999 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , **_lowerCamelCase , ):
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Optional[Any] = vocab_size
a :int = max_position_embeddings
a :Optional[int] = hidden_size
a :Optional[Any] = num_hidden_layers
a :Any = num_attention_heads
a :Any = intermediate_multiple_size
a :Optional[int] = hidden_act
a :Tuple = rotary_pct
a :Optional[int] = rotary_emb_base
a :Any = initializer_range
a :List[str] = layer_norm_eps
a :List[str] = use_cache
a :Tuple = attention_dropout
a :List[str] = hidden_dropout
| 94 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : int = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]:
for attribute in key.split("." ):
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
A_ : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Any = value
elif weight_type == "bias":
A_ : str = value
else:
A_ : Any = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ) -> List[str]:
A_ : Optional[Any] = []
A_ : Any = fairseq_model.state_dict()
A_ : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ : str = None
for name, value in fairseq_dict.items():
A_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
A_ : Optional[Any] = True
elif name.split("." )[0] == "proj":
A_ : Dict = fairseq_model.proj
A_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ : int = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(_lowerCAmelCase )[0].split("." )[-2]
A_ : int = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
A_ : List[Any] = "weight_g"
elif "weight_v" in name:
A_ : List[Any] = "weight_v"
elif "bias" in name:
A_ : Dict = "bias"
elif "weight" in name:
A_ : List[Any] = "weight"
else:
A_ : Dict = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
A_ : Any = full_name.split("conv_layers." )[-1]
A_ : Optional[int] = name.split("." )
A_ : Optional[Any] = int(items[0] )
A_ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ : List[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> str:
A_ , A_ : List[str] = emb.weight.shape
A_ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A_ : List[Any] = emb.weight.data
return lin_layer
def __snake_case ( _lowerCAmelCase : str ) -> Tuple:
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
A_ : int = f.readlines()
A_ : Dict = [line.split(" " )[0] for line in lines]
A_ : Tuple = len(_lowerCAmelCase )
A_ : Union[str, Any] = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , ) -> Tuple:
A_ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
A_ : str = SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
A_ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ : Union[str, Any] = model[0].eval()
# set weights for wav2vec2 encoder
A_ : Tuple = WavaVecaModel(_lowerCAmelCase )
A_ : str = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
A_ : Tuple = SpeechaTextaForCausalLM(_lowerCAmelCase )
A_ , A_ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
A_ : str = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
A_ : Optional[Any] = False
# add projection layer
A_ : Optional[Any] = nn.Parameter(projection_layer.weight )
A_ : int = nn.Parameter(projection_layer.bias )
A_ : str = create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "vocab.json" ) , "w" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
A_ : Any = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , "vocab.json" ) )
tokenizer.save_pretrained(_lowerCAmelCase )
A_ : Optional[int] = hf_wavavec.config.to_dict()
A_ : int = tokenizer.pad_token_id
A_ : List[str] = tokenizer.bos_token_id
A_ : List[str] = tokenizer.eos_token_id
A_ : List[str] = "speech_to_text_2"
A_ : Tuple = "wav2vec2"
A_ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 300 | 0 |
from timeit import timeit
lowerCamelCase__ = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = len(a__ ) // 2
_UpperCamelCase = len(a__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(a__ ) )
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
if len(a__ ) <= 2:
return True
if s[0] == s[len(a__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
return s == s[::-1]
def lowerCAmelCase__ ( a__ ) ->None:
'''simple docstring'''
_UpperCamelCase = f'all({name}(key) is value for key, value in test_data.items())'
_UpperCamelCase = f'from __main__ import test_data, {name}'
_UpperCamelCase = 500_000
_UpperCamelCase = timeit(stmt=a__ , setup=a__ , number=a__ )
print(f'{name:<35} finished {number:,} runs in {result:.5f} seconds' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"{key:21} {value}")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 63 | from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : str) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : int) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : str , **lowercase_ : Optional[int]) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : List[str] , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Optional[Any] , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Dict , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[int] , **lowercase_ : str) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : Dict) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : str , **lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Tuple , **lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : str , **lowercase_ : int) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Optional[int] , **lowercase_ : Any) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : int , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Optional[int] , **lowercase_ : Dict) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Any) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Any , **lowercase_ : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : List[str] , **lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : str , **lowercase_ : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
def lowerCAmelCase__ ( *a__ , **a__ ) ->Optional[Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->Any:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->List[Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->List[Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
def lowerCAmelCase__ ( *a__ , **a__ ) ->List[str]:
'''simple docstring'''
requires_backends(a__ , ["torch"] )
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : List[Any]) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : List[str] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : List[str] , **lowercase_ : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : Tuple , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : int) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Any) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Dict , **lowercase_ : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Union[str, Any] , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : int , **lowercase_ : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : Optional[int]) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : Tuple , **lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : str , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : List[Any] , **lowercase_ : int) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : int , **lowercase_ : int) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : str , **lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : int , **lowercase_ : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Any) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any]) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : int , **lowercase_ : Optional[int]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Any) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : List[str] , **lowercase_ : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Optional[Any] , **lowercase_ : List[str]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : str , **lowercase_ : int) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : int , **lowercase_ : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : List[str]) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Optional[int] , **lowercase_ : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : int , *lowercase_ : Dict , **lowercase_ : Dict) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : List[str]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Any , **lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : str) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : str , **lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : List[str] , **lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : List[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowercase_ : Tuple , **lowercase_ : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Dict , **lowercase_ : List[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Dict , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Any , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : str , **lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : List[Any] , **lowercase_ : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : str , **lowercase_ : Union[str, Any]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : int , **lowercase_ : Optional[int]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Optional[int] , **lowercase_ : str) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : Union[str, Any] , **lowercase_ : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Dict , **lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : List[str]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any]) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : Dict , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : str , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : str) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowercase_ : str , **lowercase_ : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Any , *lowercase_ : Tuple , **lowercase_ : str) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : str , **lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Any) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Any , **lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Union[str, Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : str , **lowercase_ : Any) -> Any:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Dict , *lowercase_ : Union[str, Any] , **lowercase_ : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Any , **lowercase_ : List[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : str , **lowercase_ : int) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Dict , *lowercase_ : List[Any] , **lowercase_ : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Any , *lowercase_ : List[str] , **lowercase_ : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : str , *lowercase_ : Dict , **lowercase_ : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
class _UpperCAmelCase ( metaclass=lowerCAmelCase ):
'''simple docstring'''
__A = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : int , *lowercase_ : List[str] , **lowercase_ : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["torch"])
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowercase_ : Tuple , **lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch"])
| 63 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a : str= logging.get_logger(__name__)
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"transformer.blocks.{i}.norm1.weight", F"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm1.bias", F"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.weight", F"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.bias", F"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.norm2.weight", F"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm2.bias", F"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.mlp.fc1.weight", F"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc1.bias", F"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.weight", F"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.bias", F"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
__snake_case : str = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Any = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.weight" )
__snake_case : Optional[Any] = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
__snake_case : str = in_proj_bias[: config.hidden_size]
__snake_case : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = dct.pop(UpperCAmelCase_ )
__snake_case : Optional[int] = val
@torch.no_grad()
def __UpperCAmelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Any = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=UpperCAmelCase_ )
__snake_case : Dict = False
__snake_case : Tuple = False
__snake_case : Optional[Any] = False
__snake_case : Optional[int] = False
if "vqa" in checkpoint_url:
__snake_case : str = True
__snake_case : Dict = 31_29
__snake_case : int = 'huggingface/label-files'
__snake_case : Tuple = 'vqa2-id2label.json'
__snake_case : Any = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
__snake_case : Tuple = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__snake_case : Union[str, Any] = idalabel
__snake_case : Any = {v: k for k, v in idalabel.items()}
__snake_case : List[str] = ViltForQuestionAnswering(UpperCAmelCase_ )
elif "nlvr" in checkpoint_url:
__snake_case : Optional[int] = True
__snake_case : str = 2
__snake_case : List[str] = {0: 'False', 1: 'True'}
__snake_case : int = {v: k for k, v in config.idalabel.items()}
__snake_case : Tuple = 3
__snake_case : Tuple = ViltForImagesAndTextClassification(UpperCAmelCase_ )
elif "irtr" in checkpoint_url:
__snake_case : Optional[int] = True
__snake_case : Optional[Any] = ViltForImageAndTextRetrieval(UpperCAmelCase_ )
elif "mlm_itm" in checkpoint_url:
__snake_case : Union[str, Any] = True
__snake_case : List[Any] = ViltForMaskedLM(UpperCAmelCase_ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
__snake_case : List[str] = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='cpu' )['state_dict']
__snake_case : Dict = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
if mlm_model or irtr_model:
__snake_case : List[Any] = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCAmelCase_ )
# Define processor
__snake_case : Dict = ViltImageProcessor(size=3_84 )
__snake_case : int = BertTokenizer.from_pretrained('bert-base-uncased' )
__snake_case : str = ViltProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
__snake_case : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=UpperCAmelCase_ ).raw )
__snake_case : List[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=UpperCAmelCase_ ).raw )
__snake_case : Dict = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
__snake_case : int = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' )
__snake_case : str = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' )
__snake_case : str = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
__snake_case : Any = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=UpperCAmelCase_ ).raw )
if mlm_model:
__snake_case : Optional[int] = 'a bunch of [MASK] laying on a [MASK].'
else:
__snake_case : List[str] = 'How many cats are there?'
__snake_case : Optional[int] = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt' )
__snake_case : Tuple = model(**UpperCAmelCase_ )
# Verify outputs
if mlm_model:
__snake_case : Union[str, Any] = torch.Size([1, 11, 3_05_22] )
__snake_case : Dict = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase_ , atol=1E-4 )
# verify masked token prediction equals "cats"
__snake_case : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__snake_case : Tuple = torch.Size([1, 31_29] )
__snake_case : Any = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase_ , atol=1E-4 )
# verify vqa prediction equals "2"
__snake_case : int = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__snake_case : Optional[Any] = torch.Size([1, 2] )
__snake_case : Optional[Any] = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
_a : List[str]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_a : Any= parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 172 | """simple docstring"""
def __UpperCAmelCase ( ) -> int:
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(UpperCAmelCase_ , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 172 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ =9, 14 # noqa: F841
lowerCamelCase_ =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase_ =defaultdict(_A )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ =mst(_A )
lowerCamelCase_ =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ =tuple(answer[:2] )
lowerCamelCase_ =tuple(edge[::-1] )
assert edge in result or reverse in result
| 49 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =SMALL_MODEL_IDENTIFIER
lowerCamelCase_ ="""pt"""
lowerCamelCase_ ="""tf"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
lowerCamelCase_ =TFAutoModel.from_pretrained(self.test_model , from_pt=_SCREAMING_SNAKE_CASE )
model_tf.save_pretrained(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ ="""mock_framework"""
# Framework provided - return whatever the user provides
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ), patch(
"""transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ), patch(
"""transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ):
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
| 49 | 1 |
"""simple docstring"""
A_ = {}
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_snake_case : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_snake_case : Tuple = _calculate(days - 1 , _lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_snake_case : str = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_snake_case : List[str] = _calculate(days - 1 , _lowerCAmelCase , 0 )
_snake_case : Any = state_late + state_absent + state_ontime
_snake_case : Tuple = prizestrings
return prizestrings
def UpperCAmelCase__ (snake_case__ : int = 30 ):
"""simple docstring"""
return _calculate(_lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 64 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCAmelCase__ = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase : Optional[int] = make_dataset()
def lowerCAmelCase ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
for triplet in permutations(_lowerCAmelCase , 3 ):
if sum(_lowerCAmelCase ) == target:
return tuple(sorted(_lowerCAmelCase ) )
return (0, 0, 0)
def lowerCAmelCase ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
arr.sort()
UpperCAmelCase__ = len(_lowerCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCAmelCase__ = "\ntriplet_sum1(*dataset)\n"
UpperCAmelCase__ = "\ntriplet_sum2(*dataset)\n"
UpperCAmelCase__ = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=1_0000 )
UpperCAmelCase__ = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=1_0000 )
return (min(_lowerCAmelCase ), min(_lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase : Optional[int] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 169 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }') | 317 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase ) | 317 | 1 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
__lowerCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
__lowerCAmelCase = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase = re.compile(r'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__lowerCAmelCase = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase = re.compile(r'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase = re.compile(r'''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__lowerCAmelCase = re.compile(r'''^\s*try:''')
# Catches a line with else:
__lowerCAmelCase = re.compile(r'''^\s*else:''')
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
if _re_test_backend.search(lowerCAmelCase_ ) is None:
return None
_a : Optional[Any] = [b[0] for b in _re_backend.findall(lowerCAmelCase_ )]
backends.sort()
return "_and_".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
with open(lowerCAmelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_a : Any = f.readlines()
_a : Tuple = 0
while line_index < len(lowerCAmelCase_ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCAmelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
_a : int = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
_a : Dict = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCAmelCase_ ):
_a : Dict = _re_one_line_import_struct.search(lowerCAmelCase_ ).groups()[0]
_a : Dict = re.findall(r'\[([^\]]+)\]' , lowerCAmelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
_a : str = _re_import_struct_key_value.search(lowerCAmelCase_ )
if single_line_import_search is not None:
_a : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowerCAmelCase_ ) > 0]
objects.extend(lowerCAmelCase_ )
elif line.startswith(' ' * 8 + '\"' ):
objects.append(line[9:-3] )
line_index += 1
_a : Union[str, Any] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_a : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_a : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_a : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
_a : List[str] = lines[line_index]
if _re_import_struct_add_one.search(lowerCAmelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCAmelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCAmelCase_ ) is not None:
_a : Optional[int] = _re_import_struct_add_many.search(lowerCAmelCase_ ).groups()[0].split(', ' )
_a : Tuple = [obj[1:-1] for obj in imports if len(lowerCAmelCase_ ) > 0]
objects.extend(lowerCAmelCase_ )
elif _re_between_brackets.search(lowerCAmelCase_ ) is not None:
_a : List[str] = _re_between_brackets.search(lowerCAmelCase_ ).groups()[0].split(', ' )
_a : int = [obj[1:-1] for obj in imports if len(lowerCAmelCase_ ) > 0]
objects.extend(lowerCAmelCase_ )
elif _re_quote_object.search(lowerCAmelCase_ ) is not None:
objects.append(_re_quote_object.search(lowerCAmelCase_ ).groups()[0] )
elif line.startswith(' ' * 8 + '\"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '\"' ):
objects.append(line[13:-3] )
line_index += 1
_a : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_a : Tuple = []
while (
line_index < len(lowerCAmelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
_a : str = lines[line_index]
_a : List[str] = _re_import.search(lowerCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_a : Tuple = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCAmelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_a : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_a : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_a : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
_a : Dict = lines[line_index]
_a : List[str] = _re_import.search(lowerCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_a : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
def find_duplicates(lowerCAmelCase_ ):
return [k for k, v in collections.Counter(lowerCAmelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_a : Tuple = []
for key in import_dict_objects.keys():
_a : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_a : str = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_a : str = 'base imports' if key == 'none' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCamelCase ( ) -> Dict:
_a : List[Any] = []
for root, _, files in os.walk(lowerCAmelCase_ ):
if "__init__.py" in files:
_a : Optional[Any] = os.path.join(lowerCAmelCase_ , '__init__.py' )
_a : Optional[Any] = parse_init(lowerCAmelCase_ )
if objects is not None:
_a : Optional[Any] = analyze_results(*lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_a : str = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) > 0:
raise ValueError('\n\n'.join(lowerCAmelCase_ ) )
def __lowerCamelCase ( ) -> Union[str, Any]:
_a : int = []
for path, directories, files in os.walk(lowerCAmelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowerCAmelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCAmelCase_ ) / folder).glob('*.py' ) ) ) == 0:
continue
_a : Optional[Any] = str((Path(lowerCAmelCase_ ) / folder).relative_to(lowerCAmelCase_ ) )
_a : Dict = short_path.replace(os.path.sep , '.' )
submodules.append(lowerCAmelCase_ )
for fname in files:
if fname == "__init__.py":
continue
_a : int = str((Path(lowerCAmelCase_ ) / fname).relative_to(lowerCAmelCase_ ) )
_a : Dict = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowerCAmelCase_ )
return submodules
__lowerCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __lowerCamelCase ( ) -> str:
from transformers.utils import direct_transformers_import
_a : Optional[Any] = direct_transformers_import(lowerCAmelCase_ )
_a : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCAmelCase_ , '__init__.py' ) , 'r' ) as f:
_a : Optional[int] = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , lowerCAmelCase_ ) ) )
_a : List[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCAmelCase_ ) > 0:
_a : Any = '\n'.join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
f"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 89 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if isinstance(snake_case , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __lowerCAmelCase :
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model}
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase = after_output[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1e-5 )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_snake_case , _snake_case , F'Difference between torch and flax is {diff} (>= {tol}).' )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_pretrained_model_and_inputs()
_lowerCAmelCase = model_a(**_snake_case )
_lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase = model_a(**_snake_case )
_lowerCAmelCase = after_outputs[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1e-5 )
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFViTModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFViTModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFRobertaModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = TFRobertaModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModel(_snake_case , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" )
return vision_model, text_model
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_snake_case )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_snake_case , padding=_snake_case , return_tensors="""np""" )
_lowerCAmelCase = model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _snake_case , atol=1e-3 ) )
| 82 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowercase__ = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
lowercase__ = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
lowercase__ = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Dict , lowercase_ : List[str]=False ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = spearmanr(lowercase_ , lowercase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 280 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
set_seed(770)
lowercase__ = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
lowercase__ = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
lowercase__ = os.path.dirname(os.path.abspath(__file__))
lowercase__ = os.path.join(os.path.expanduser("~"), ".cache")
lowercase__ = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=False ):
UpperCAmelCase : List[str] = model_type
if use_small:
key += "_small"
return os.path.join(UpperCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
hf_hub_download(repo_id=UpperCAmelCase_ , filename=UpperCAmelCase_ , local_dir=UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_="text" ):
if model_type == "text":
UpperCAmelCase : Dict = BarkSemanticModel
UpperCAmelCase : List[Any] = BarkSemanticConfig
UpperCAmelCase : Optional[int] = BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCAmelCase : List[str] = BarkCoarseModel
UpperCAmelCase : Dict = BarkCoarseConfig
UpperCAmelCase : int = BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCAmelCase : List[Any] = BarkFineModel
UpperCAmelCase : Optional[Any] = BarkFineConfig
UpperCAmelCase : Dict = BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCAmelCase : Optional[int] = F"""{model_type}_small""" if use_small else model_type
UpperCAmelCase : Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCAmelCase_ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
UpperCAmelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
# this is a hack
UpperCAmelCase : str = checkpoint['model_args']
if "input_vocab_size" not in model_args:
UpperCAmelCase : Union[str, Any] = model_args['vocab_size']
UpperCAmelCase : Union[str, Any] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCAmelCase : Any = model_args.pop('n_head' )
UpperCAmelCase : Optional[Any] = model_args.pop('n_embd' )
UpperCAmelCase : Union[str, Any] = model_args.pop('n_layer' )
UpperCAmelCase : List[Any] = ConfigClass(**checkpoint['model_args'] )
UpperCAmelCase : List[str] = ModelClass(config=UpperCAmelCase_ )
UpperCAmelCase : List[str] = GenerationConfigClass()
UpperCAmelCase : Dict = model_generation_config
UpperCAmelCase : int = checkpoint['model']
# fixup checkpoint
UpperCAmelCase : Tuple = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(UpperCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
UpperCAmelCase : str = k[len(UpperCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
UpperCAmelCase : List[Any] = new_k.replace(UpperCAmelCase_ , new_layer_name_dict[old_layer_name] )
UpperCAmelCase : List[Any] = state_dict.pop(UpperCAmelCase_ )
UpperCAmelCase : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
UpperCAmelCase : Optional[int] = {k for k in extra_keys if not k.endswith('.attn.bias' )}
UpperCAmelCase : str = set(model.state_dict().keys() ) - set(state_dict.keys() )
UpperCAmelCase : str = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(UpperCAmelCase_ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(UpperCAmelCase_ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
UpperCAmelCase : List[str] = model.num_parameters(exclude_embeddings=UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = checkpoint['best_val_loss'].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(UpperCAmelCase_ , 3 )} loss""" )
model.eval()
model.to(UpperCAmelCase_ )
del checkpoint, state_dict
return model
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCAmelCase : List[str] = 'cpu' # do conversion on cpu
UpperCAmelCase : List[str] = _get_ckpt_path(UpperCAmelCase_ , use_small=UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = _load_model(UpperCAmelCase_ , UpperCAmelCase_ , model_type=UpperCAmelCase_ , use_small=UpperCAmelCase_ )
# load bark initial model
UpperCAmelCase : List[str] = _bark_load_model(UpperCAmelCase_ , 'cpu' , model_type=UpperCAmelCase_ , use_small=UpperCAmelCase_ )
if model_type == "text":
UpperCAmelCase : Tuple = bark_model['model']
if model.num_parameters(exclude_embeddings=UpperCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
UpperCAmelCase : Optional[int] = 5
UpperCAmelCase : Optional[int] = 10
if model_type in ["text", "coarse"]:
UpperCAmelCase : List[Any] = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
UpperCAmelCase : Optional[Any] = bark_model(UpperCAmelCase_ )[0]
UpperCAmelCase : List[str] = model(UpperCAmelCase_ )
# take last logits
UpperCAmelCase : str = output_new_model_total.logits[:, [-1], :]
else:
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : List[Any] = 8
UpperCAmelCase : Dict = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
UpperCAmelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Dict = bark_model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Any = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Dict = BarkSemanticConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
UpperCAmelCase : Any = BarkCoarseConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
UpperCAmelCase : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(UpperCAmelCase_ , 'config.json' ) )
UpperCAmelCase : Any = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
UpperCAmelCase : Dict = BarkSemanticModel.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Tuple = BarkCoarseModel.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = BarkFineModel.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = EncodecModel.from_pretrained('facebook/encodec_24khz' )
UpperCAmelCase : Optional[Any] = BarkConfig.from_sub_model_configs(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
UpperCAmelCase : str = BarkModel(UpperCAmelCase_ )
UpperCAmelCase : int = semantic
UpperCAmelCase : Tuple = coarseAcoustic
UpperCAmelCase : Union[str, Any] = fineAcoustic
UpperCAmelCase : Union[str, Any] = codec
UpperCAmelCase : Optional[int] = bark_generation_config
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
bark.save_pretrained(UpperCAmelCase_ , repo_id=UpperCAmelCase_ , push_to_hub=UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
lowercase__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 280 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Dict = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class __A( a ):
snake_case_ = '''rwkv'''
snake_case_ = {'''max_position_embeddings''': '''context_length'''}
def __init__( self , _snake_case=50_277 , _snake_case=1_024 , _snake_case=4_096 , _snake_case=32 , _snake_case=None , _snake_case=None , _snake_case=1E-5 , _snake_case=0 , _snake_case=0 , _snake_case=6 , _snake_case=False , _snake_case=True , **_snake_case , ) -> List[str]:
'''simple docstring'''
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) | 6 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class snake_case_ ( __UpperCamelCase ):
def UpperCAmelCase__ ( self : Optional[int] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase__ ( self : str )->Optional[int]:
'''simple docstring'''
with self.assertRaises(_lowerCAmelCase ):
__lowerCAmelCase : Dict = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def UpperCAmelCase__ ( self : Tuple )->Optional[Any]:
'''simple docstring'''
with self.assertRaises(_lowerCAmelCase ):
__lowerCAmelCase : str = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def UpperCAmelCase__ ( self : int )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase__ ( self : Optional[int] )->int:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__lowerCAmelCase : Dict = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def UpperCAmelCase__ ( self : Optional[int] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase__ ( self : Optional[Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def UpperCAmelCase__ ( self : Any )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def UpperCAmelCase__ ( self : List[Any] )->Tuple:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__lowerCAmelCase : Optional[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def UpperCAmelCase__ ( self : List[Any] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def UpperCAmelCase__ ( self : Tuple )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def UpperCAmelCase__ ( self : List[str] )->int:
'''simple docstring'''
import PIL.Image
__lowerCAmelCase : Dict = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=_lowerCAmelCase ) as mock_cast_to_python_objects:
__lowerCAmelCase : int = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
__lowerCAmelCase : List[str] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , _lowerCAmelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int ) -> str:
__lowerCAmelCase : Dict = pa.BufferReader(__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , pa.Buffer ) else pa.memory_map(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = pa.ipc.open_stream(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[Any] ) -> str:
__lowerCAmelCase : Dict = pa.BufferOutputStream()
__lowerCAmelCase : List[str] = pa.schema(__SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE , schema=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__lowerCAmelCase : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase : List[str] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowerCAmelCase : Optional[Any] = pa.BufferOutputStream()
__lowerCAmelCase : Any = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
__lowerCAmelCase : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
__lowerCAmelCase : int = pa.BufferReader(output.getvalue() )
__lowerCAmelCase : List[Any] = pa.ipc.open_stream(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase : pa.Table = f.read_all()
__lowerCAmelCase : int = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
__lowerCAmelCase : Any = pa.BufferOutputStream()
with ArrowWriter(
stream=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE , hash_salt="""split_name""" , check_duplicates=__SCREAMING_SNAKE_CASE , ) as writer:
with pytest.raises(__SCREAMING_SNAKE_CASE ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
__lowerCAmelCase : str = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict ) -> Dict:
__lowerCAmelCase : str = pa.BufferOutputStream()
with ArrowWriter(
stream=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE , hash_salt="""split_name""" , check_duplicates=__SCREAMING_SNAKE_CASE , ) as writer:
with pytest.raises(__SCREAMING_SNAKE_CASE ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
__lowerCAmelCase : str = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] ) -> Tuple:
__lowerCAmelCase : str = pa.BufferOutputStream()
with ArrowWriter(
stream=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE , hash_salt="""split_name""" , check_duplicates=__SCREAMING_SNAKE_CASE , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
__lowerCAmelCase : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Any ) -> str:
__lowerCAmelCase : Dict = pa.BufferOutputStream()
__lowerCAmelCase : Optional[int] = pa.schema(__SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE , schema=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
__lowerCAmelCase : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any ) -> Union[str, Any]:
__lowerCAmelCase : int = pa.BufferOutputStream()
__lowerCAmelCase : Any = pa.schema(__SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE , schema=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
__lowerCAmelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :str ) -> str:
__lowerCAmelCase : str = pa.BufferOutputStream()
__lowerCAmelCase : int = pa.schema(__SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE , schema=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
__lowerCAmelCase : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase : Any = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _SCREAMING_SNAKE_CASE ( ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Union[str, Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
__lowerCAmelCase : str = os.path.join(__SCREAMING_SNAKE_CASE , """test.arrow""" )
with ArrowWriter(path=__SCREAMING_SNAKE_CASE , schema=pa.schema(__SCREAMING_SNAKE_CASE ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
__lowerCAmelCase : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(__SCREAMING_SNAKE_CASE , 1 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] ) -> Optional[Any]:
if pa.types.is_list(__SCREAMING_SNAKE_CASE ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> Dict:
if isinstance(lst[0] , __SCREAMING_SNAKE_CASE ):
change_first_primitive_element_in_list(lst[0] , __SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Tuple = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = pa.array(TypedSequence(__SCREAMING_SNAKE_CASE , optimized_int_type=__SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> str:
__lowerCAmelCase : Optional[int] = pa.array(OptimizedTypedSequence(__SCREAMING_SNAKE_CASE , col=__SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
__lowerCAmelCase : Optional[Any] = copy.deepcopy(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = pa.array(OptimizedTypedSequence(__SCREAMING_SNAKE_CASE , col=__SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any ) -> Optional[Any]:
__lowerCAmelCase : int = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__SCREAMING_SNAKE_CASE ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dict ) -> Any:
__lowerCAmelCase : Any = """mock://dataset-train.arrow"""
with ArrowWriter(path=__SCREAMING_SNAKE_CASE , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__SCREAMING_SNAKE_CASE ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__lowerCAmelCase : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowerCAmelCase : Tuple = pa.BufferOutputStream()
with ParquetWriter(stream=__SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__lowerCAmelCase : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
__lowerCAmelCase : str = pa.BufferReader(output.getvalue() )
__lowerCAmelCase : pa.Table = pq.read_table(__SCREAMING_SNAKE_CASE )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[int]:
import PIL.Image
__lowerCAmelCase : List[Any] = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__SCREAMING_SNAKE_CASE , format="""png""" )
__lowerCAmelCase : List[str] = pa.BufferOutputStream()
with ParquetWriter(
stream=__SCREAMING_SNAKE_CASE , features=Features({"""image""": Image()} ) , embed_local_files=__SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
__lowerCAmelCase : List[str] = pa.BufferReader(output.getvalue() )
__lowerCAmelCase : pa.Table = pq.read_table(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowerCAmelCase : Any = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__SCREAMING_SNAKE_CASE )] )
__lowerCAmelCase : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE ) as writer:
writer._build_writer(inferred_schema=__SCREAMING_SNAKE_CASE )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 370 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class snake_case_ ( unittest.TestCase ):
def __init__( self : List[Any] , _snake_case : List[Any] , _snake_case : str=13 , _snake_case : int=30 , _snake_case : str=2 , _snake_case : int=3 , _snake_case : Optional[Any]=True , _snake_case : str=True , _snake_case : Optional[int]=32 , _snake_case : Dict=5 , _snake_case : Optional[int]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]="gelu" , _snake_case : str=0.1 , _snake_case : str=0.1 , _snake_case : str=10 , _snake_case : Any=0.02 , )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : int = image_size
__lowerCAmelCase : int = patch_size
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : str = is_training
__lowerCAmelCase : str = use_labels
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = type_sequence_label_size
__lowerCAmelCase : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : Tuple = (image_size // patch_size) ** 2
__lowerCAmelCase : Optional[Any] = num_patches + 1
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Tuple = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase__ ( self : List[Any] , _snake_case : List[str] , _snake_case : List[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : str = FlaxViTModel(config=_snake_case )
__lowerCAmelCase : int = model(_snake_case )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : Dict = (self.image_size, self.image_size)
__lowerCAmelCase : Any = (self.patch_size, self.patch_size)
__lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , _snake_case : str , _snake_case : List[Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.type_sequence_label_size
__lowerCAmelCase : Tuple = FlaxViTForImageClassification(config=_snake_case )
__lowerCAmelCase : List[str] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase : str = 1
__lowerCAmelCase : Any = FlaxViTForImageClassification(_snake_case )
__lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase : Dict = model(_snake_case )
def UpperCAmelCase__ ( self : str )->Any:
'''simple docstring'''
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : str )->None:
'''simple docstring'''
__lowerCAmelCase : List[str] = FlaxViTModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def UpperCAmelCase__ ( self : Any )->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCAmelCase__ ( self : int )->int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : int = model_class(_snake_case )
__lowerCAmelCase : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Tuple = [*signature.parameters.keys()]
__lowerCAmelCase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase__ ( self : str )->str:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : Optional[int] = self._prepare_for_class(_snake_case , _snake_case )
__lowerCAmelCase : List[str] = model_class(_snake_case )
@jax.jit
def model_jitted(_snake_case : Dict , **_snake_case : Union[str, Any] ):
return model(pixel_values=_snake_case , **_snake_case )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : List[Any] = model_jitted(**_snake_case ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : str = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self : Dict )->str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__lowerCAmelCase : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_snake_case ) | 232 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A__ :
A__ = 42
A__ = None
A__ = None
def _lowerCAmelCase ( ) -> Node | None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Node(1 )
_SCREAMING_SNAKE_CASE =Node(2 )
_SCREAMING_SNAKE_CASE =Node(3 )
_SCREAMING_SNAKE_CASE =Node(4 )
_SCREAMING_SNAKE_CASE =Node(5 )
return tree
def _lowerCAmelCase ( _UpperCamelCase : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _lowerCAmelCase ( _UpperCamelCase : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _lowerCAmelCase ( _UpperCamelCase : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _lowerCAmelCase ( _UpperCamelCase : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _lowerCAmelCase ( _UpperCamelCase : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
if root is None:
return output
_SCREAMING_SNAKE_CASE =deque([root] )
while process_queue:
_SCREAMING_SNAKE_CASE =process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _lowerCAmelCase ( _UpperCamelCase : Node | None , _UpperCamelCase : int ) -> Sequence[Node | None]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
def populate_output(_UpperCamelCase : Node | None , _UpperCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def _lowerCAmelCase ( _UpperCamelCase : Node | None , _UpperCamelCase : int ) -> Sequence[Node | None]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
def populate_output(_UpperCamelCase : Node | None , _UpperCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def _lowerCAmelCase ( _UpperCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =height(_UpperCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_UpperCamelCase , _UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =1
else:
output.append(get_nodes_from_right_to_left(_UpperCamelCase , _UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =0
return output
def _lowerCAmelCase ( ) -> None: # Main function for testing.
"""simple docstring"""
_SCREAMING_SNAKE_CASE =make_tree()
print(f"In-order Traversal: {inorder(_UpperCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_UpperCamelCase )}" )
print(f"Post-order Traversal: {postorder(_UpperCamelCase )}" , '\n' )
print(f"Height of Tree: {height(_UpperCamelCase )}" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(_UpperCamelCase ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(_UpperCamelCase ) + 1 ):
print(f"Level {level}:" , get_nodes_from_left_to_right(_UpperCamelCase , level=_UpperCamelCase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 47 |
'''simple docstring'''
class A__ :
def __init__( self : Union[str, Any] , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =[0] * size
_SCREAMING_SNAKE_CASE =[0] * size
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def A ( self : Tuple , _a : int , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =value
while index < self.size:
_SCREAMING_SNAKE_CASE =self.get_prev(_a ) + 1
if current_left_border == index:
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =max(_a , _a , _a )
_SCREAMING_SNAKE_CASE =self.get_next(_a )
def A ( self : int , _a : int , _a : int ) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
_SCREAMING_SNAKE_CASE =0
while left <= right:
_SCREAMING_SNAKE_CASE =self.get_prev(_a )
if left <= current_left:
_SCREAMING_SNAKE_CASE =max(_a , self.tree[right] )
_SCREAMING_SNAKE_CASE =current_left
else:
_SCREAMING_SNAKE_CASE =max(_a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase_( snake_case__: List[Any] , snake_case__: List[str] , snake_case__: Dict , snake_case__: Optional[int] ) -> Optional[int]:
UpperCAmelCase__ = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase__ = {
'wmt16-en-de-dist-12-1': [2_8.3, 2_7.5_2],
'wmt16-en-de-dist-6-1': [2_7.4, 2_7.1_1],
'wmt16-en-de-12-1': [2_6.9, 2_5.7_5],
}
UpperCAmelCase__ = f"{src_lang}-{tgt_lang}"
UpperCAmelCase__ = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=snake_case__ , exist_ok=snake_case__ )
UpperCAmelCase__ = os.path.join(snake_case__ , 'README.md' )
print(f"Generating {path}" )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
_UpperCamelCase = Path(__file__).resolve().parent.parent.parent
_UpperCamelCase = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_UpperCamelCase = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 335 |
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335 | 1 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class A__ ( __magic_name__ ):
# to overwrite at feature extractactor specific tests
lowercase = None
lowercase = None
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a , 'feature_size' ) )
self.assertTrue(hasattr(a , 'sampling_rate' ) )
self.assertTrue(hasattr(a , 'padding_value' ) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a ) == len(a ) for x, y in zip(a , processed_features[input_name] ) ) )
lowerCAmelCase__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a )
lowerCAmelCase__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCAmelCase__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a )
lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Tuple = feat_extract.model_input_names[0]
lowerCAmelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCAmelCase__ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a )
lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowerCAmelCase__ : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCamelCase ( self : Dict , a : List[str]=False ):
'''simple docstring'''
def _inputs_have_equal_length(a : List[Any] ):
lowerCAmelCase__ : str = len(input[0] )
for input_slice in input[1:]:
if len(a ) != length:
return False
return True
def _inputs_are_equal(a : Dict , a : Optional[int] ):
if len(a ) != len(a ):
return False
for input_slice_a, input_slice_a in zip(a , a ):
if not np.allclose(np.asarray(a ) , np.asarray(a ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=a )
lowerCAmelCase__ : Optional[int] = feat_extract.model_input_names[0]
lowerCAmelCase__ : List[str] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : str = self.feat_extract_tester.seq_length_diff
lowerCAmelCase__ : Dict = self.feat_extract_tester.max_seq_length + pad_diff
lowerCAmelCase__ : str = self.feat_extract_tester.min_seq_length
lowerCAmelCase__ : Tuple = self.feat_extract_tester.batch_size
lowerCAmelCase__ : Optional[Any] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCAmelCase__ : Dict = feat_extract.pad(a , padding=a )
lowerCAmelCase__ : Dict = input_a[input_name]
lowerCAmelCase__ : Any = feat_extract.pad(a , padding='longest' )
lowerCAmelCase__ : List[Any] = input_a[input_name]
lowerCAmelCase__ : Optional[Any] = feat_extract.pad(a , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowerCAmelCase__ : int = input_a[input_name]
lowerCAmelCase__ : Tuple = feat_extract.pad(a , padding='longest' , return_tensors='np' )
lowerCAmelCase__ : Tuple = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(a ):
feat_extract.pad(a , padding='max_length' )[input_name]
lowerCAmelCase__ : Dict = feat_extract.pad(
a , padding='max_length' , max_length=a , return_tensors='np' )
lowerCAmelCase__ : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(a ) )
self.assertTrue(_inputs_have_equal_length(a ) )
self.assertTrue(_inputs_have_equal_length(a ) )
self.assertTrue(_inputs_are_equal(a , a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ : Optional[int] = feat_extract.pad(a , pad_to_multiple_of=10 )
lowerCAmelCase__ : Tuple = input_a[input_name]
lowerCAmelCase__ : Dict = feat_extract.pad(a , padding='longest' , pad_to_multiple_of=10 )
lowerCAmelCase__ : Any = input_a[input_name]
lowerCAmelCase__ : Optional[Any] = feat_extract.pad(
a , padding='max_length' , pad_to_multiple_of=10 , max_length=a )
lowerCAmelCase__ : List[Any] = input_a[input_name]
lowerCAmelCase__ : Dict = feat_extract.pad(
a , padding='max_length' , pad_to_multiple_of=10 , max_length=a , return_tensors='np' , )
lowerCAmelCase__ : Tuple = input_a[input_name]
self.assertTrue(all(len(a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(a , a ) )
lowerCAmelCase__ : List[Any] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCAmelCase__ : int = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def _lowerCamelCase ( self : Union[str, Any] , a : int=False ):
'''simple docstring'''
def _inputs_have_equal_length(a : Any ):
lowerCAmelCase__ : Optional[int] = len(input[0] )
for input_slice in input[1:]:
if len(a ) != length:
return False
return True
def _inputs_are_equal(a : Union[str, Any] , a : str ):
if len(a ) != len(a ):
return False
for input_slice_a, input_slice_a in zip(a , a ):
if not np.allclose(np.asarray(a ) , np.asarray(a ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=a )
lowerCAmelCase__ : List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCAmelCase__ : Tuple = feat_extract.pad(
a , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=a )
lowerCAmelCase__ : Optional[int] = input_a[input_name]
lowerCAmelCase__ : Optional[Any] = feat_extract.pad(a , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowerCAmelCase__ : List[Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(a ) )
self.assertFalse(_inputs_have_equal_length(a ) )
# truncate to smallest with np
lowerCAmelCase__ : Dict = feat_extract.pad(
a , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=a , )
lowerCAmelCase__ : Tuple = input_a[input_name]
lowerCAmelCase__ : Optional[int] = feat_extract.pad(
a , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowerCAmelCase__ : str = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(a ) )
# truncate to middle
lowerCAmelCase__ : Tuple = feat_extract.pad(
a , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=a , return_tensors='np' , )
lowerCAmelCase__ : int = input_a[input_name]
lowerCAmelCase__ : Optional[int] = feat_extract.pad(
a , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=a )
lowerCAmelCase__ : Union[str, Any] = input_a[input_name]
lowerCAmelCase__ : Any = feat_extract.pad(
a , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowerCAmelCase__ : Any = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(a ) )
self.assertTrue(_inputs_have_equal_length(a ) )
self.assertTrue(_inputs_are_equal(a , a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(a ):
feat_extract.pad(a , truncation=a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(a ):
feat_extract.pad(a , padding='longest' , truncation=a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(a ):
feat_extract.pad(a , padding='longest' , truncation=a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(a ):
feat_extract.pad(a , padding='max_length' , truncation=a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ : Union[str, Any] = 12
lowerCAmelCase__ : Dict = feat_extract.pad(
a , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=a , truncation=a , )
lowerCAmelCase__ : Optional[Any] = input_a[input_name]
lowerCAmelCase__ : Optional[Any] = feat_extract.pad(
a , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=a , )
lowerCAmelCase__ : Dict = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCAmelCase__ : int = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCAmelCase__ : int = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(a ) )
self.assertFalse(_inputs_have_equal_length(a ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self._check_padding(numpify=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
self._check_padding(numpify=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
self._check_truncation(numpify=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
self._check_truncation(numpify=a )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : str = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Dict = feat_extract.pad(a , padding='longest' , return_tensors='np' )[input_name]
lowerCAmelCase__ : Dict = feat_extract.pad(a , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : Any = feat_extract.model_input_names[0]
lowerCAmelCase__ : str = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : str = feat_extract.pad(a , padding='longest' , return_tensors='np' )[input_name]
lowerCAmelCase__ : Union[str, Any] = feat_extract.pad(a , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : str = self.feat_extract_dict
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[str] = self.feature_extraction_class(**a )
lowerCAmelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : str = [len(a ) for x in speech_inputs]
lowerCAmelCase__ : Optional[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Union[str, Any] = feat_extract.pad(a , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.feat_extract_dict
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : str = self.feature_extraction_class(**a )
lowerCAmelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : List[str] = [len(a ) for x in speech_inputs]
lowerCAmelCase__ : Dict = feat_extract.model_input_names[0]
lowerCAmelCase__ : Any = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : List[Any] = min(a )
lowerCAmelCase__ : Dict = feat_extract.pad(
a , padding='max_length' , max_length=a , truncation=a , return_tensors='np' )
self.assertIn('attention_mask' , a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) | 212 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : int = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase__ : Any = model_name.find('patch' )
lowerCAmelCase__ : Any = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase__ : Any = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
lowerCAmelCase__ : List[str] = 768
lowerCAmelCase__ : Optional[int] = 3_072
lowerCAmelCase__ : Any = 12
lowerCAmelCase__ : Optional[int] = 1_024
lowerCAmelCase__ : List[Any] = 4_096
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Any = 24
lowerCAmelCase__ : Dict = 768
lowerCAmelCase__ : List[str] = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase__ : str = 336
lowerCAmelCase__ : Any = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
lowerCAmelCase__ : List[Any] = 768
return config
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase__ : Optional[Any] = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase__ : int = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase__ : List[str] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase__ : Tuple = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase__ : List[str] = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase__ : Optional[Any] = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase__ : List[str] = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase__ : Dict = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase__ : Any = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase__ : Union[str, Any] = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase__ : Dict = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase__ : Tuple = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase__ : Any = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase__ : str = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase__ : Tuple = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase__ : Dict = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase__ : List[str] = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase__ : List[Any] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : Any = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn.in_proj" in key:
lowerCAmelCase__ : str = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase__ : str = key_split[3]
lowerCAmelCase__ : Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase__ : Any = val[
:dim, :
]
lowerCAmelCase__ : Optional[Any] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : int = val[
-dim:, :
]
else:
lowerCAmelCase__ : str = val[
:dim
]
lowerCAmelCase__ : Union[str, Any] = val[
dim : dim * 2
]
lowerCAmelCase__ : Any = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase__ : str = val[
:dim, :
]
lowerCAmelCase__ : Tuple = val[
dim : dim * 2, :
]
lowerCAmelCase__ : int = val[
-dim:, :
]
else:
lowerCAmelCase__ : List[Any] = val[:dim]
lowerCAmelCase__ : Optional[Any] = val[
dim : dim * 2
]
lowerCAmelCase__ : int = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase__ : Dict = key_split[2]
lowerCAmelCase__ : str = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase__ : List[Any] = val[:dim, :]
lowerCAmelCase__ : List[Any] = val[dim : dim * 2, :]
lowerCAmelCase__ : Any = val[-dim:, :]
else:
lowerCAmelCase__ : int = val[:dim]
lowerCAmelCase__ : Optional[Any] = val[dim : dim * 2]
lowerCAmelCase__ : List[str] = val[-dim:]
else:
lowerCAmelCase__ : int = key_split[2]
lowerCAmelCase__ : Optional[Any] = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : int = val[:dim, :]
lowerCAmelCase__ : List[str] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : Dict = val[-dim:, :]
else:
lowerCAmelCase__ : List[str] = val[:dim]
lowerCAmelCase__ : int = val[
dim : dim * 2
]
lowerCAmelCase__ : Optional[int] = val[-dim:]
else:
lowerCAmelCase__ : Optional[int] = rename_key(SCREAMING_SNAKE_CASE_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase__ : List[str] = val.T
lowerCAmelCase__ : List[str] = val
return orig_state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
if num_frames == 8:
lowerCAmelCase__ : Tuple = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase__ : Dict = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase__ : List[Any] = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase__ : Dict = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , )
lowerCAmelCase__ : Optional[int] = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> str:
lowerCAmelCase__ : Optional[int] = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase__ : Dict = model_to_url[model_name]
lowerCAmelCase__ : List[str] = 8
if "16-frames" in model_name:
lowerCAmelCase__ : Any = 16
elif "shot" in model_name:
lowerCAmelCase__ : str = 32
lowerCAmelCase__ : str = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = XCLIPModel(SCREAMING_SNAKE_CASE_ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase__ : Any = 'pytorch_model.bin'
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
else:
lowerCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )['model']
lowerCAmelCase__ : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = XCLIPModel(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : str = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase__ : Optional[int] = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCAmelCase__ : List[str] = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase__ : List[Any] = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase__ : Optional[Any] = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = prepare_video(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
lowerCAmelCase__ : Optional[Any] = outputs.logits_per_video
lowerCAmelCase__ : Tuple = logits_per_video.softmax(dim=1 )
print('Probs:' , SCREAMING_SNAKE_CASE_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase__ : str = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase__ : Dict = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase__ : int = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase__ : Any = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase__ : Any = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase__ : Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase__ : int = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase__ : Dict = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase__ : Optional[Any] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase__ : Tuple = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase__ : int = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase__ : Optional[Any] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase__ : str = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase__ : Optional[Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase__ : int = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase__ : List[Any] = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='nielsr' )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='nielsr' )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 212 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase ) | 356 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase ) | 329 | 0 |
def a ( A__ : Any ) -> Union[str, Any]:
"""simple docstring"""
return str(__SCREAMING_SNAKE_CASE ) == str(__SCREAMING_SNAKE_CASE )[::-1]
def a ( A__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return int(__SCREAMING_SNAKE_CASE ) + int(str(__SCREAMING_SNAKE_CASE )[::-1] )
def a ( A__ : Dict = 10000 ) -> List[Any]:
"""simple docstring"""
_lowercase =[]
for num in range(1 , __SCREAMING_SNAKE_CASE ):
_lowercase =0
_lowercase =num
while iterations < 50:
_lowercase =sum_reverse(__SCREAMING_SNAKE_CASE )
iterations += 1
if is_palindrome(__SCREAMING_SNAKE_CASE ):
break
else:
lychrel_nums.append(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"{solution() = }")
| 205 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = r'\w+[.]\d+'
lowercase = re.findall(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for pat in pats:
lowercase = key.replace(__SCREAMING_SNAKE_CASE , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
lowercase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase = flax_model.init_weights(PRNGKey(__SCREAMING_SNAKE_CASE ) )
lowercase = flatten_dict(__SCREAMING_SNAKE_CASE )
lowercase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase = rename_key(__SCREAMING_SNAKE_CASE )
lowercase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
lowercase , lowercase = rename_key_and_reshape_tensor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase = jnp.asarray(__SCREAMING_SNAKE_CASE )
return unflatten_dict(__SCREAMING_SNAKE_CASE )
| 195 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str ) -> str:
return "".join(chr(ord(lowerCAmelCase__ ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 1 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
__A = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
__A = "</w>"
__A = "@@ "
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: Any = set()
__lowerCAmelCase: Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase: Any = char
return pairs
# Speech2Text2 has no max input length
__A = {"facebook/s2t-wav2vec2-large-en-de": 1024}
class snake_case ( A_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : List[Any]="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Tuple , )-> int:
'''simple docstring'''
super().__init__(
unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , do_lower_case=_snake_case , **_snake_case , )
__lowerCAmelCase: Optional[int] = do_lower_case
with open(_snake_case , encoding="utf-8") as vocab_handle:
__lowerCAmelCase: List[str] = json.load(_snake_case)
__lowerCAmelCase: Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
__lowerCAmelCase: Union[str, Any] = None
__lowerCAmelCase: int = None
else:
with open(_snake_case , encoding="utf-8") as merges_handle:
__lowerCAmelCase: Tuple = merges_handle.read().split("\n")[:-1]
__lowerCAmelCase: Optional[int] = [tuple(merge.split()[:2]) for merge in merges]
__lowerCAmelCase: Tuple = dict(zip(_snake_case , range(len(_snake_case))))
__lowerCAmelCase: Any = {}
@property
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
return len(self.decoder)
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__lowerCAmelCase: List[str] = get_pairs(_snake_case)
if not pairs:
return token
while True:
__lowerCAmelCase: List[str] = min(_snake_case , key=lambda UpperCamelCase__: self.bpe_ranks.get(_snake_case , float("inf")))
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase: Tuple = bigram
__lowerCAmelCase: int = []
__lowerCAmelCase: int = 0
while i < len(_snake_case):
try:
__lowerCAmelCase: Any = word.index(_snake_case , _snake_case)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__lowerCAmelCase: Tuple = j
if word[i] == first and i < len(_snake_case) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__lowerCAmelCase: List[Any] = tuple(_snake_case)
__lowerCAmelCase: Dict = new_word
if len(_snake_case) == 1:
break
else:
__lowerCAmelCase: List[Any] = get_pairs(_snake_case)
__lowerCAmelCase: Optional[Any] = ''' '''.join(_snake_case)
if word == "\n " + BPE_TOKEN_MERGES:
__lowerCAmelCase: Union[str, Any] = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(_snake_case):
__lowerCAmelCase: Optional[int] = word.replace(_snake_case , "")
__lowerCAmelCase: Union[str, Any] = word.replace(" " , _snake_case)
__lowerCAmelCase: Optional[Any] = word
return word
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Dict)-> Optional[int]:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
__lowerCAmelCase: Tuple = text.lower()
__lowerCAmelCase: Optional[Any] = text.split()
__lowerCAmelCase: Dict = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_snake_case).split(" ")))
return split_tokens
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : str)-> int:
'''simple docstring'''
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token))
def lowercase_ ( self : Dict , UpperCamelCase__ : int)-> str:
'''simple docstring'''
__lowerCAmelCase: List[Any] = self.decoder.get(_snake_case , self.unk_token)
return result
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str])-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ''' '''.join(_snake_case)
# make sure @@ tokens are concatenated
__lowerCAmelCase: Tuple = ''''''.join(string.split(_snake_case))
return string
def lowercase_ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCAmelCase: str = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
__lowerCAmelCase: int = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(_snake_case , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case) + "\n")
__lowerCAmelCase: List[str] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_snake_case , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
__lowerCAmelCase: Tuple = token_index
writer.write(" ".join(_snake_case) + "\n")
index += 1
return (vocab_file, merges_file)
| 217 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
__a : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _lowerCamelCase ( self ):
__a : str = self.dummy_uncond_unet
__a : Any = PNDMScheduler()
__a : str = PNDMPipeline(unet=_a , scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
__a : Optional[int] = torch.manual_seed(0 )
__a : Dict = pndm(generator=_a , num_inference_steps=20 , output_type='''numpy''' ).images
__a : Any = torch.manual_seed(0 )
__a : int = pndm(generator=_a , num_inference_steps=20 , output_type='''numpy''' , return_dict=_a )[0]
__a : str = image[0, -3:, -3:, -1]
__a : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : Optional[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Dict = '''google/ddpm-cifar10-32'''
__a : List[str] = UNetaDModel.from_pretrained(_a )
__a : Tuple = PNDMScheduler()
__a : int = PNDMPipeline(unet=_a , scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
__a : List[Any] = torch.manual_seed(0 )
__a : Optional[Any] = pndm(generator=_a , output_type='''numpy''' ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : Dict = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 188 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Dict = logging.get_logger()
def _lowerCamelCase ( lowercase : int , lowercase : str , lowercase : LevitConfig , lowercase : Path , lowercase : bool = True ) -> Optional[int]:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_a = timm.create_model("levit_128s" , pretrained=lowercase )
else:
_a = timm.create_model("levit_128" , pretrained=lowercase )
if hidden_sizes == 192:
_a = timm.create_model("levit_192" , pretrained=lowercase )
if hidden_sizes == 256:
_a = timm.create_model("levit_256" , pretrained=lowercase )
if hidden_sizes == 384:
_a = timm.create_model("levit_384" , pretrained=lowercase )
from_model.eval()
_a = LevitForImageClassificationWithTeacher(lowercase ).eval()
_a = OrderedDict()
_a = from_model.state_dict()
_a = list(from_model.state_dict().keys() )
_a = list(our_model.state_dict().keys() )
print(len(lowercase ) , len(lowercase ) )
for i in range(len(lowercase ) ):
_a = weights[og_keys[i]]
our_model.load_state_dict(lowercase )
_a = torch.randn((2, 3, 224, 224) )
_a = from_model(lowercase )
_a = our_model(lowercase ).logits
assert torch.allclose(lowercase , lowercase ), "The model logits don't match the original one."
_a = name
print(lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_a = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _lowerCamelCase ( lowercase : Path , lowercase : str = None , lowercase : bool = True ) -> List[Any]:
_a = "imagenet-1k-id2label.json"
_a = 1000
_a = (1, num_labels)
_a = "huggingface/label-files"
_a = num_labels
_a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
_a = {int(lowercase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
_a = partial(lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase )
_a = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
_a = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowercase , names_to_config[model_name] , lowercase , lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowercase , lowercase , lowercase , lowercase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ : List[str] = parser.parse_args()
lowerCAmelCase_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 63 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 | 1 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__A =1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=16 , lowercase=13 , lowercase=7 , lowercase=14 , lowercase=10 , lowercase=19 , lowercase=5 , lowercase=4 , lowercase=True , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=[1, 2, 3, 4, 5] , lowercase=25 , lowercase=5 , ) -> Tuple:
lowerCamelCase_ = d_model
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = prediction_length
lowerCamelCase_ = context_length
lowerCamelCase_ = cardinality
lowerCamelCase_ = num_time_features
lowerCamelCase_ = lags_sequence
lowerCamelCase_ = embedding_dimension
lowerCamelCase_ = is_training
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = context_length
lowerCamelCase_ = prediction_length + label_length
lowerCamelCase_ = label_length
lowerCamelCase_ = moving_average
lowerCamelCase_ = autocorrelation_factor
def SCREAMING_SNAKE_CASE_( self ) -> int:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = config.context_length + max(config.lags_sequence )
lowerCamelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase_ = floats_tensor([self.batch_size, _past_length] )
lowerCamelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.get_config()
lowerCamelCase_ = self.prepare_autoformer_inputs_dict(lowercase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ , lowerCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Any:
lowerCamelCase_ = AutoformerModel(config=lowercase ).to(lowercase ).eval()
lowerCamelCase_ = model(**lowercase )
lowerCamelCase_ = outputs.encoder_last_hidden_state
lowerCamelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = model.get_encoder()
encoder.save_pretrained(lowercase )
lowerCamelCase_ = AutoformerEncoder.from_pretrained(lowercase ).to(lowercase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = model.create_network_inputs(**lowercase )
lowerCamelCase_ , lowerCamelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase_ = encoder(inputs_embeds=lowercase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowerCamelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = model.get_decoder()
decoder.save_pretrained(lowercase )
lowerCamelCase_ = AutoformerDecoder.from_pretrained(lowercase ).to(lowercase )
lowerCamelCase_ = decoder(
trend=lowercase , inputs_embeds=lowercase , encoder_hidden_states=lowercase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCAmelCase__ = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = AutoformerModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
lowerCamelCase_ , lowerCamelCase_ = model_class.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertEqual(info["missing_keys"] , [] )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase )
@unittest.skip(reason="Model has no tokens embeddings" )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = inspect.signature(getattr(lowercase , "forward" ) )
# The main input is the name of the argument after `self`
lowerCamelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowercase )] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
lowerCamelCase_ = getattr(self.model_tester , "seq_length" , lowercase )
lowerCamelCase_ = getattr(self.model_tester , "decoder_seq_length" , lowercase )
lowerCamelCase_ = getattr(self.model_tester , "encoder_seq_length" , lowercase )
lowerCamelCase_ = getattr(self.model_tester , "d_model" , lowercase )
lowerCamelCase_ = getattr(self.model_tester , "num_attention_heads" , lowercase )
lowerCamelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCamelCase_ = outputs.encoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase_ = len(lowercase )
lowerCamelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowercase , lowercase )
# decoder attentions
lowerCamelCase_ = outputs.decoder_attentions
self.assertIsInstance(lowercase , (list, tuple) )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase_ = outputs.cross_attentions
self.assertIsInstance(lowercase , (list, tuple) )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 2 , len(lowercase ) )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase_ ( lowerCamelCase__="train-batch.pt" ):
lowerCamelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=lowerCamelCase__ , repo_type="dataset" )
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
return batch
@require_torch
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowercase )
lowerCamelCase_ = prepare_batch()
with torch.no_grad():
lowerCamelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
lowerCamelCase_ = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowercase )
lowerCamelCase_ = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowercase , atol=lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowercase )
lowerCamelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
lowerCamelCase_ = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowercase )
lowerCamelCase_ = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowercase , atol=lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowercase )
lowerCamelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
lowerCamelCase_ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowercase )
lowerCamelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=lowercase )
lowerCamelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowercase , rtol=1e-1 ) )
| 371 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 47 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.