code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=9_9 , __lowerCAmelCase=0 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase="last" , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Dict = parent
__magic_name__ :List[str] = batch_size
__magic_name__ :Optional[Any] = seq_length
__magic_name__ :Union[str, Any] = is_training
__magic_name__ :Dict = use_input_lengths
__magic_name__ :Tuple = use_token_type_ids
__magic_name__ :Dict = use_labels
__magic_name__ :List[str] = gelu_activation
__magic_name__ :Dict = sinusoidal_embeddings
__magic_name__ :List[Any] = causal
__magic_name__ :Dict = asm
__magic_name__ :Union[str, Any] = n_langs
__magic_name__ :List[Any] = vocab_size
__magic_name__ :int = n_special
__magic_name__ :Tuple = hidden_size
__magic_name__ :Optional[int] = num_hidden_layers
__magic_name__ :Optional[Any] = num_attention_heads
__magic_name__ :Any = hidden_dropout_prob
__magic_name__ :List[str] = attention_probs_dropout_prob
__magic_name__ :Any = max_position_embeddings
__magic_name__ :Tuple = type_vocab_size
__magic_name__ :Optional[int] = type_sequence_label_size
__magic_name__ :Tuple = initializer_range
__magic_name__ :Dict = num_labels
__magic_name__ :Any = num_choices
__magic_name__ :Optional[Any] = summary_type
__magic_name__ :Dict = use_proj
__magic_name__ :Tuple = scope
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :int = None
if self.use_input_lengths:
__magic_name__ :List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__magic_name__ :Dict = None
if self.use_token_type_ids:
__magic_name__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__magic_name__ :str = None
__magic_name__ :Union[str, Any] = None
__magic_name__ :int = None
if self.use_labels:
__magic_name__ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__magic_name__ :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :int = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = FlaubertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :str = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , langs=__lowerCAmelCase )
__magic_name__ :List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :str = FlaubertWithLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :List[str] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = FlaubertForQuestionAnsweringSimple(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :int = model(__lowerCAmelCase )
__magic_name__ :Optional[int] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Tuple = FlaubertForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )
__magic_name__ :int = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , )
__magic_name__ :Optional[Any] = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , )
((__magic_name__) , ) :Any = result_with_labels.to_tuple()
__magic_name__ :str = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
((__magic_name__) , ) :List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = FlaubertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :int = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :List[str] = self.num_labels
__magic_name__ :Any = FlaubertForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :List[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Dict = self.num_choices
__magic_name__ :Any = FlaubertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ :Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ :List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ :Any = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :int = config_and_inputs
__magic_name__ :str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
__magic_name__ :int = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__magic_name__ :Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
__magic_name__ :Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = FlaubertModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ :List[str] = FlaubertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
@require_torch_gpu
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__magic_name__ :List[str] = True
__magic_name__ :Union[str, Any] = model_class(config=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Union[str, Any] = torch.jit.trace(
__lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) )
__magic_name__ :Any = torch.jit.load(os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) , map_location=__lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(__lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(__lowerCAmelCase ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
__magic_name__ :int = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
__magic_name__ :List[str] = model(__lowerCAmelCase )[0]
__magic_name__ :List[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowerCAmelCase )
__magic_name__ :Optional[Any] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 0 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCAmelCase : bool = None
__lowerCAmelCase : bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCAmelCase : Optional[Any] = datasets.Audio()
__lowerCAmelCase : Union[str, Any] = 'audio'
__lowerCAmelCase : str = AudioFolderConfig
__lowerCAmelCase : List[str] # definition at the bottom of the script
__lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' )
lowerCamelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCamelCase__ : int = AUDIO_EXTENSIONS
| 12 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class __lowerCamelCase :
def __init__( self: Dict,A_: Optional[int],A_: Dict,A_: bool = True,A_: bool = False ):
'''simple docstring'''
__UpperCamelCase = scheduler
__UpperCamelCase = optimizers if isinstance(A_,(list, tuple) ) else [optimizers]
__UpperCamelCase = split_batches
__UpperCamelCase = step_with_optimizer
__UpperCamelCase = GradientState()
def snake_case_ ( self: Optional[int],*A_: int,**A_: str ):
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*A_,**A_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*A_,**A_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__UpperCamelCase = AcceleratorState().num_processes
for _ in range(A_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler,'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*A_,**A_ )
else:
self.scheduler.step(*A_,**A_ )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return self.scheduler.get_last_lr()
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return self.scheduler.state_dict()
def snake_case_ ( self: Optional[int],A_: Tuple ):
'''simple docstring'''
self.scheduler.load_state_dict(A_ )
def snake_case_ ( self: int ):
'''simple docstring'''
return self.scheduler.get_lr()
def snake_case_ ( self: Optional[int],*A_: Union[str, Any],**A_: List[Any] ):
'''simple docstring'''
return self.scheduler.print_lr(*A_,**A_ )
| 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : int = (DDPMScheduler,)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.dummy_model()
lowercase__ : List[Any] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : str = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""")
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Tuple = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE_):
if i == len(SCREAMING_SNAKE_CASE_) - 1:
lowercase__ : Optional[int] = -1
else:
lowercase__ : Tuple = timesteps[i + 1]
lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_)
lowercase__ : int = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = [1_00, 87, 50, 1, 0]
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase_ = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :List[str]=None ) -> Any:
require_version(deps[pkg] , _snake_case )
| 2 |
def UpperCamelCase ( lowercase_ ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = BarthezTokenizer
lowerCAmelCase_ = BarthezTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
super().setUp()
UpperCamelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=A_ )
UpperCamelCase = tokenizer
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = '<pad>'
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(A_ ) , 101122 )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCamelCase = [0, 57, 3018, 70307, 91, 2]
UpperCamelCase = self.tokenizer(
A_ , max_length=len(A_ ) , padding=A_ , truncation=A_ , return_tensors='pt' )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = tokenizer.tokenize(A_ )
UpperCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(A_ )
UpperCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=A_ , )
| 3 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : Tuple = do_pad
lowercase__ : Optional[Any] = pad_size
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase__ : str = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size
lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images]
if do_pad:
lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Dict = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 12 | 0 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger('''transformers.models.speecht5''')
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict ):
hf_model.apply_weight_norm()
lowerCAmelCase = checkpoint['input_conv.weight_g']
lowerCAmelCase = checkpoint['input_conv.weight_v']
lowerCAmelCase = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase = checkpoint[F'upsamples.{i}.1.weight_g']
lowerCAmelCase = checkpoint[F'upsamples.{i}.1.weight_v']
lowerCAmelCase = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase = checkpoint['output_conv.1.weight_g']
lowerCAmelCase = checkpoint['output_conv.1.weight_v']
lowerCAmelCase = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
lowerCAmelCase = SpeechTaHifiGanConfig()
lowerCAmelCase = SpeechTaHifiGan(_UpperCAmelCase )
lowerCAmelCase = torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = np.load(_UpperCAmelCase )
lowerCAmelCase = stats[0].reshape(-1 )
lowerCAmelCase = stats[1].reshape(-1 )
lowerCAmelCase = torch.from_numpy(_UpperCAmelCase ).float()
lowerCAmelCase = torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCamelCase__ : Optional[int] = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[Any] = True
while ask_again:
lowercase__ : Tuple = input(lowercase_ )
try:
if default is not None and len(lowercase_ ) == 0:
return default
return convert_value(lowercase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ )
lowercase__ : Any = menu.run(default_choice=lowercase_ )
return convert_value(lowercase_ ) if convert_value is not None else result
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Union[str, Any] = int(lowercase_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[str] = int(lowercase_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : str = int(lowercase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _snake_case ( argparse.RawDescriptionHelpFormatter ):
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""")
return usage
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_lowerCAmelCase = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_lowerCAmelCase = model(_lowercase )["""last_hidden_state"""]
_lowerCAmelCase = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
_lowerCAmelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 5 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ["input_ids", "attention_mask"]
lowerCamelCase_ = RobertaTokenizer
def __init__( self :List[Any] , __A :Tuple=None , __A :str=None , __A :Tuple=None , __A :Tuple="replace" , __A :List[Any]="<s>" , __A :Union[str, Any]="</s>" , __A :Any="</s>" , __A :Dict="<s>" , __A :Optional[Any]="<unk>" , __A :List[str]="<pad>" , __A :Any="<mask>" , __A :Dict=False , __A :Tuple=True , **__A :Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = getattr(__A , pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__A )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = """post_processor"""
SCREAMING_SNAKE_CASE__ = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE__ = tuple(state["""sep"""] )
if "cls" in state:
SCREAMING_SNAKE_CASE__ = tuple(state["""cls"""] )
SCREAMING_SNAKE_CASE__ = False
if state.get("""add_prefix_space""" , __A ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = True
if state.get("""trim_offsets""" , __A ) != trim_offsets:
SCREAMING_SNAKE_CASE__ = trim_offsets
SCREAMING_SNAKE_CASE__ = True
if changes_to_apply:
SCREAMING_SNAKE_CASE__ = getattr(__A , state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
def _snake_case ( self :str ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self :List[str] , __A :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
SCREAMING_SNAKE_CASE__ = value
def _snake_case ( self :str , *__A :int , **__A :Tuple ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = kwargs.get("""is_split_into_words""" , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A , **__A )
def _snake_case ( self :Optional[Any] , *__A :List[Any] , **__A :Dict ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = kwargs.get("""is_split_into_words""" , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A , **__A )
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _snake_case ( self :Dict , __A :Union[str, Any] , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self :Optional[int] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 6 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( UpperCAmelCase_ ):
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = []
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_init_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_evaluate""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_predict""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_save""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_log""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_)
return Trainer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
# Order doesn't matter
lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_)
else:
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = ["""on_init_end""", """on_train_begin"""]
lowercase__ : Union[str, Any] = 0
lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader())
lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(SCREAMING_SNAKE_CASE_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.get_trainer()
lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# Callbacks passed at init are added to the default callbacks
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : Tuple = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# We can also add, pop, or remove by instance
lowercase__ : Union[str, Any] = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# Independent log/save/eval
lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""")
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""")
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
lowercase__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
| 12 | 0 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a = pytest.mark.integration
a = {'''comet'''}
a = importlib.util.find_spec('''fairseq''') is not None
a = {'''code_eval'''}
a = os.name == '''nt'''
a = {'''bertscore''', '''frugalscore''', '''perplexity'''}
a = importlib.util.find_spec('''transformers''') is not None
def _snake_case ( _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
@wraps(_snake_case )
def wrapper(self : List[str] , _snake_case : str ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _snake_case )
return wrapper
def _snake_case ( _snake_case : Any ) -> List[str]:
'''simple docstring'''
@wraps(_snake_case )
def wrapper(self : Union[str, Any] , _snake_case : Optional[int] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _snake_case )
return wrapper
def _snake_case ( _snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
@wraps(_snake_case )
def wrapper(self : Union[str, Any] , _snake_case : List[str] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _snake_case )
return wrapper
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@local
class lowercase_ ( parameterized.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : Optional[Any] = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str ):
_A = '[...]'
_A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , _UpperCAmelCase ) ).module_path )
_A = datasets.load.import_main_class(metric_module.__name__ , dataset=_UpperCAmelCase )
# check parameters
_A = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_UpperCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
_A = doctest.testmod(_UpperCAmelCase , verbose=_UpperCAmelCase , raise_on_error=_UpperCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[int] ):
_A = '[...]'
_A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , _UpperCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
_A = doctest.testmod(_UpperCAmelCase , verbose=_UpperCAmelCase , raise_on_error=_UpperCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_UpperCAmelCase ):
yield
else:
yield
@contextmanager
def lowerCAmelCase_ ( self : str ):
def load_local_metric(_UpperCAmelCase : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Optional[Any] ):
return load_metric(os.path.join('metrics' , _UpperCAmelCase ) , *_UpperCAmelCase , **_UpperCAmelCase )
with patch('datasets.load_metric' ) as mock_load_metric:
_A = load_local_metric
yield
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , _UpperCAmelCase : str ):
def wrapper(_UpperCAmelCase : str ):
_A = contextmanager(_UpperCAmelCase )
_A = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def _snake_case ( _snake_case : int ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Dict ):
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
_A = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def _snake_case ( _snake_case : Tuple ) -> Tuple:
'''simple docstring'''
import torch
def bert_cos_score_idf(_snake_case : Optional[Any] , _snake_case : Optional[Any] , *_snake_case : Union[str, Any] , **_snake_case : Optional[Any] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_snake_case ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
_A = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def _snake_case ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
def load_from_checkpoint(_snake_case : str ):
class lowercase_ :
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Tuple ):
assert len(_UpperCAmelCase ) == 2
_A = [0.19, 0.92]
return scores, sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
_A = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
_A = load_from_checkpoint
yield
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = load_metric(os.path.join('metrics' , 'seqeval' ) )
_A = 'ERROR'
_A = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(_snake_case , match=re.escape(_snake_case ) ):
metric.compute(predictions=[] , references=[] , scheme=_snake_case )
| 7 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = RoCBertTokenizer
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = filter_non_english
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = i
lowercase__ : Any = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowercase__ : Optional[int] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = i
lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
lowercase__ : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def lowercase__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ : List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False
lowercase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""的""", """人""", """有"""]
lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = False
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_)
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ : Optional[int] = """你好,你是谁"""
lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12 | 0 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 128 , _UpperCAmelCase = 256 , _UpperCAmelCase = 2000.0 , _UpperCAmelCase = 768 , _UpperCAmelCase = 12 , _UpperCAmelCase = 12 , _UpperCAmelCase = 64 , _UpperCAmelCase = 2048 , _UpperCAmelCase = 0.1 , ):
'''simple docstring'''
super().__init__()
__A : Tuple = nn.Sequential(
nn.Linear(_UpperCAmelCase , d_model * 4 , bias=_UpperCAmelCase) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_UpperCAmelCase) , nn.SiLU() , )
__A : Optional[Any] = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase)
__A : Dict = False
__A : Any = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
__A : Dict = nn.Dropout(p=_UpperCAmelCase)
__A : Dict = nn.ModuleList()
for lyr_num in range(_UpperCAmelCase):
# FiLM conditional T5 decoder
__A : str = DecoderLayer(d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase)
self.decoders.append(_UpperCAmelCase)
__A : Dict = TaLayerNorm(_UpperCAmelCase)
__A : Optional[int] = nn.Dropout(p=_UpperCAmelCase)
__A : int = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A ,__A ,__A : Tuple = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__A : Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
__A : List[str] = self.conditioning_emb(_UpperCAmelCase).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__A : Union[str, Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__A : List[Any] = torch.broadcast_to(
torch.arange(_UpperCAmelCase , device=decoder_input_tokens.device) , (batch, seq_length) , )
__A : str = self.position_encoding(_UpperCAmelCase)
__A : List[str] = self.continuous_inputs_projection(_UpperCAmelCase)
inputs += position_encodings
__A : str = self.dropout(_UpperCAmelCase)
# decoder: No padding present.
__A : int = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
__A : str = [(x, self.encoder_decoder_mask(_UpperCAmelCase , _UpperCAmelCase)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__A : Union[str, Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
__A : Dict = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
__A : List[str] = lyr(
_UpperCAmelCase , conditioning_emb=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )[0]
__A : Optional[Any] = self.decoder_norm(_UpperCAmelCase)
__A : Optional[int] = self.post_dropout(_UpperCAmelCase)
__A : Optional[Any] = self.spec_out(_UpperCAmelCase)
return spec_out
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1e-6):
'''simple docstring'''
super().__init__()
__A : str = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , dropout_rate=_UpperCAmelCase))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Union[str, Any] = self.layer[0](
_UpperCAmelCase , conditioning_emb=_UpperCAmelCase , attention_mask=_UpperCAmelCase , )
if encoder_hidden_states is not None:
__A : Optional[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0).to(
encoder_hidden_states.dtype)
__A : Dict = self.layer[1](
_UpperCAmelCase , key_value_states=_UpperCAmelCase , attention_mask=_UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
__A : List[Any] = self.layer[-1](_UpperCAmelCase , _UpperCAmelCase)
return (hidden_states,)
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : int = TaLayerNorm(_UpperCAmelCase)
__A : int = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCAmelCase)
__A : Optional[Any] = Attention(query_dim=_UpperCAmelCase , heads=_UpperCAmelCase , dim_head=_UpperCAmelCase , out_bias=_UpperCAmelCase , scale_qk=_UpperCAmelCase)
__A : Dict = nn.Dropout(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : List[str] = self.layer_norm(_UpperCAmelCase)
if conditioning_emb is not None:
__A : List[Any] = self.FiLMLayer(_UpperCAmelCase , _UpperCAmelCase)
# Self-attention block
__A : Any = self.attention(_UpperCAmelCase)
__A : Any = hidden_states + self.dropout(_UpperCAmelCase)
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Any = Attention(query_dim=_UpperCAmelCase , heads=_UpperCAmelCase , dim_head=_UpperCAmelCase , out_bias=_UpperCAmelCase , scale_qk=_UpperCAmelCase)
__A : List[str] = TaLayerNorm(_UpperCAmelCase , eps=_UpperCAmelCase)
__A : List[Any] = nn.Dropout(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Tuple = self.layer_norm(_UpperCAmelCase)
__A : str = self.attention(
_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , attention_mask=attention_mask.squeeze(1) , )
__A : List[Any] = hidden_states + self.dropout(_UpperCAmelCase)
return layer_output
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Any = TaDenseGatedActDense(d_model=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase)
__A : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCAmelCase)
__A : int = TaLayerNorm(_UpperCAmelCase , eps=_UpperCAmelCase)
__A : Optional[int] = nn.Dropout(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None):
'''simple docstring'''
__A : Optional[Any] = self.layer_norm(_UpperCAmelCase)
if conditioning_emb is not None:
__A : Optional[int] = self.film(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = self.DenseReluDense(_UpperCAmelCase)
__A : Any = hidden_states + self.dropout(_UpperCAmelCase)
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : List[str] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
__A : List[Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
__A : Optional[int] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
__A : Tuple = nn.Dropout(_UpperCAmelCase)
__A : Tuple = NewGELUActivation()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = self.act(self.wi_a(_UpperCAmelCase))
__A : List[Any] = self.wi_a(_UpperCAmelCase)
__A : int = hidden_gelu * hidden_linear
__A : List[Any] = self.dropout(_UpperCAmelCase)
__A : int = self.wo(_UpperCAmelCase)
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=1e-6):
'''simple docstring'''
super().__init__()
__A : Optional[int] = nn.Parameter(torch.ones(_UpperCAmelCase))
__A : Optional[Any] = eps
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=_UpperCAmelCase)
__A : str = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__A : Optional[Any] = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(_UpperCAmelCase , 3.0))))
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Tuple = nn.Linear(_UpperCAmelCase , out_features * 2 , bias=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = self.scale_bias(_UpperCAmelCase)
__A ,__A : str = torch.chunk(_UpperCAmelCase , 2 , -1)
__A : Optional[Any] = x * (1 + scale) + shift
return x | 8 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase__ : Any = kwargs.pop("""text_queries""")
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)):
lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase__ : int = image
lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return results
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase__ : int = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = load_image(inputs["""image"""])
lowercase__ : Any = inputs["""candidate_labels"""]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = candidate_labels.split(""",""")
lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = model_inputs.pop("""target_size""")
lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""")
lowercase__ : Dict = model_inputs.pop("""is_last""")
lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for model_output in model_outputs:
lowercase__ : Optional[int] = model_output["""candidate_label"""]
lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0]
for index in outputs["scores"].nonzero():
lowercase__ : Optional[Any] = outputs["""scores"""][index].item()
lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0])
lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box}
results.append(SCREAMING_SNAKE_CASE_)
lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_)
if top_k:
lowercase__ : Any = results[:top_k]
return results
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""")
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 12 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : str ):
"""simple docstring"""
A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
A__ = 'A painting of a squirrel eating a burger'
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = sd_pipe.prepare_inputs(_snake_case )
A__ = replicate(_snake_case )
A__ = shard(_snake_case )
A__ = jax.random.PRNGKey(0 )
A__ = jax.random.split(_snake_case , jax.device_count() )
A__ = sd_pipe(_snake_case , _snake_case , _snake_case , num_inference_steps=25 , jit=_snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ = images[0, 2_53:2_56, 2_53:2_56, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = 'stabilityai/stable-diffusion-2'
A__ , A__ = FlaxDPMSolverMultistepScheduler.from_pretrained(_snake_case , subfolder='scheduler' )
A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained(
_snake_case , scheduler=_snake_case , revision='bf16' , dtype=jnp.bfloataa , )
A__ = scheduler_params
A__ = 'A painting of a squirrel eating a burger'
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = sd_pipe.prepare_inputs(_snake_case )
A__ = replicate(_snake_case )
A__ = shard(_snake_case )
A__ = jax.random.PRNGKey(0 )
A__ = jax.random.split(_snake_case , jax.device_count() )
A__ = sd_pipe(_snake_case , _snake_case , _snake_case , num_inference_steps=25 , jit=_snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ = images[0, 2_53:2_56, 2_53:2_56, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 9 |
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase__ : List[str] = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
lowercase__ : List[Any] = val
return f[i][j]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase__ : Tuple = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowercase__ : str = len(lowercase_ )
if num_items != len(lowercase_ ):
lowercase__ : Optional[int] = (
"""The number of weights must be the same as the number of values.\n"""
F'But got {num_items} weights and {len(lowercase_ )} values'
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
lowercase__ : int = (
"""All weights must be integers but got weight of """
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(lowercase_ )
lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : set = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : Dict = [3, 2, 4, 4]
lowerCamelCase__ : List[Any] = [4, 3, 2, 3]
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Dict = 6
lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : int = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" )
lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" )
lowercase__ : str = value.float()
for key, value in codebook_state_dict.items():
lowercase__ : Any = value
return upgrade
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ )
else:
lowercase__ : Optional[int] = FlavaConfig()
lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval()
lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ )
if os.path.exists(lowercase_ ):
lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" )
else:
lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ )
hf_model.load_state_dict(lowercase_ )
lowercase__ : Optional[int] = hf_model.state_dict()
lowercase__ : Optional[int] = count_parameters(lowercase_ )
lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ : List[str] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 12 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ConditionalDetrFeatureExtractor"]
lowercase_ = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : str = image_size
lowercase__ : int = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Union[str, Any] = size
lowercase__ : Any = do_normalize
lowercase__ : Tuple = image_mean
lowercase__ : str = image_std
def lowercase__ ( self):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = EfficientFormerImageProcessorTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size"""))
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 12 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__lowerCamelCase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase : str = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__lowerCamelCase : List[Any] = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_60_00,
'return_attention_mask': False,
'do_normalize': True,
}
__lowerCamelCase : List[str] = tempfile.mkdtemp()
__lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__lowerCamelCase : List[str] = 'hf-internal-testing/ngram-beam-search-decoder'
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> str:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> str:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Dict = self.get_feature_extractor()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowerCamelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Dict = self.get_feature_extractor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : Any = self.get_decoder()
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = floats_list((3, 10_00) )
__lowerCamelCase : Union[str, Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__lowerCamelCase : Union[str, Any] = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
__lowerCamelCase : Optional[int] = self.get_tokenizer()
__lowerCamelCase : Optional[Any] = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = 'This is a test string'
__lowerCamelCase : Any = processor(text=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=(2, 10, 16) , SCREAMING_SNAKE_CASE_=77 ) -> List[Any]:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = self.get_feature_extractor()
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : Optional[Any] = self.get_decoder()
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowerCamelCase : Optional[int] = processor.decode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Optional[int] = self.get_feature_extractor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowerCamelCase : List[str] = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__lowerCamelCase : str = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__lowerCamelCase : Dict = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : str = self.get_feature_extractor()
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Any = self.get_decoder()
__lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self._get_dummy_logits()
__lowerCamelCase : int = 15
__lowerCamelCase : Dict = -2_0.0
__lowerCamelCase : Optional[Any] = -4.0
__lowerCamelCase : List[Any] = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = decoded_processor_out.text
__lowerCamelCase : Optional[int] = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__lowerCamelCase : List[Any] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
__lowerCamelCase : Tuple = [d[0][2] for d in decoded_decoder_out]
__lowerCamelCase : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[Any] = self.get_feature_extractor()
__lowerCamelCase : int = self.get_tokenizer()
__lowerCamelCase : int = self.get_decoder()
__lowerCamelCase : Dict = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = self._get_dummy_logits()
__lowerCamelCase : Union[str, Any] = 2.0
__lowerCamelCase : str = 5.0
__lowerCamelCase : int = -2_0.0
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : str = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = decoded_processor_out.text
__lowerCamelCase : int = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__lowerCamelCase : Union[str, Any] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Tuple = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowerCamelCase : str = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowerCamelCase : Optional[int] = os.listdir(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = snapshot_download('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = processor.decoder.model_container[processor.decoder._model_key]
__lowerCamelCase : int = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowerCamelCase : str = os.listdir(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> str:
__lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Dict = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Tuple = floats_list((3, 10_00) )
__lowerCamelCase : Optional[Any] = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__lowerCamelCase : Optional[int] = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowerCamelCase : int = self._get_dummy_logits()
__lowerCamelCase : Tuple = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = self.get_feature_extractor()
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : List[Any] = self.get_decoder()
__lowerCamelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def lowercase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : int = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : str = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Optional[Any] = self._get_dummy_logits()[0]
__lowerCamelCase : Dict = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCamelCase : Union[str, Any] = self._get_dummy_logits()
__lowerCamelCase : Tuple = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self ) -> Union[str, Any]:
import torch
__lowerCamelCase : Optional[Any] = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_60_00 ) )
__lowerCamelCase : List[str] = iter(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = next(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__lowerCamelCase : Tuple = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowerCamelCase : List[Any] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__lowerCamelCase : Optional[int] = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowerCamelCase : Optional[int] = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__lowerCamelCase : Optional[Any] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__lowerCamelCase : str = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__lowerCamelCase : Tuple = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__lowerCamelCase : Any = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowerCamelCase : str = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 13 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __UpperCAmelCase ( __a : List[str] ,__a : Dict ) -> List[str]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_a : Tuple = flax_key_tuple[:-1] + ('''weight''',)
_a : Union[str, Any] = torch.permute(__a ,(0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__a ):
# linear layer
_a : Optional[Any] = flax_key_tuple[:-1] + ('''weight''',)
_a : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_a : Tuple = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __UpperCAmelCase ( __a : int ,__a : List[str] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
if "metadata" in layer:
_a : Optional[Any] = layer.split('''metadata''' )
_a : Optional[int] = ''''''.join(split_layer[0] )[:-1]
_a : Dict = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
_a : List[Any] = layer.split('''kvstore''' )
_a : List[str] = ''''''.join(split_layer[0] )[:-1]
_a : str = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
_a : Tuple = layer.split('''/''' )
_a : Any = '''/'''.join(split_layer[:-1] )
_a : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_a : Tuple = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
_a : str = '''file'''
else:
_a : Optional[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ) -> Optional[Any]:
"""simple docstring"""
_a : int = rename_keys(__a )
_a : Any = {}
for k, v in current_block.items():
_a : Dict = v
_a : int = new_current_block
torch.save(__a ,__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : int ,__a : str ,__a : int ,__a : str = WEIGHTS_NAME ) -> Any:
"""simple docstring"""
_a : Optional[Any] = convert_file_size_to_int(__a )
_a : List[str] = []
_a : List[Any] = {}
_a : int = 0
_a : List[Any] = 0
os.makedirs(__a ,exist_ok=__a )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' ,'''rb''' ) as fp:
_a : List[Any] = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
_a : Union[str, Any] = flatten_dict(__a ,sep='''/''' )
_a : Optional[int] = {}
for layer in checkpoint_info.keys():
_a , _a , _a : int = get_key_and_tensorstore_dict(
__a ,__a ,__a )
if curr_real_layer_name in all_layers:
_a : Any = content
else:
_a : Dict = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_a : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_a : Optional[Any] = torch.tensor(__a )
_a : List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_a , _a : str = rename_base_flax_keys(tuple(key.split('''/''' ) ) ,__a )
_a : int = '''/'''.join(__a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_a : Union[str, Any] = os.path.join(
__a ,weights_name.replace('''.bin''' ,F"""-{len(__a )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__a ,__a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_a : List[Any] = {}
_a : Optional[int] = 0
_a : Dict = raw_weights.to(getattr(__a ,__a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_a : Dict = os.path.join(__a ,weights_name.replace('''.bin''' ,F"""-{len(__a )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__a ,__a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_a : int = {}
_a : Any = {}
for idx, shard in enumerate(__a ):
_a : int = weights_name.replace(
'''.bin''' ,F"""-{idx+1:05d}-of-{len(__a ):05d}.bin""" ) # len(sharded_state_dicts):05d}
_a : Optional[Any] = os.path.join(__a ,weights_name.replace('''.bin''' ,F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__a ,os.path.join(__a ,__a ) )
_a : Optional[Any] = shard
for key in shard:
_a : str = shard_file
# Add the metadata
_a : str = {'''total_size''': total_size}
_a : str = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__a ,__a ) ,'''w''' ,encoding='''utf-8''' ) as f:
_a : Optional[int] = json.dumps(__a ,indent=2 ,sort_keys=__a ) + '''\n'''
f.write(__a )
return metadata, index
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
a__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_a : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
_a : Tuple = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' ,device_map='''auto''' )
_a : Any = TaTokenizer.from_pretrained('''t5-small''' )
_a : Optional[int] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
_a : str = tokenizer(__a ,return_tensors='''pt''' ).input_ids
_a : Any = model.generate(__a ,decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 14 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
raise RuntimeError("""CUDA out of memory.""" )
class _snake_case ( nn.Module ):
def __init__( self):
'''simple docstring'''
super().__init__()
lowercase__ : Optional[Any] = nn.Linear(3 , 4)
lowercase__ : Union[str, Any] = nn.BatchNormad(4)
lowercase__ : str = nn.Linear(4 , 5)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_)))
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, """hello"""])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function(1_28 , """hello""" , """world""")
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0])
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
raise ValueError("""Oops, we had an error!""")
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0])
@require_cuda
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = torch.cuda.memory_allocated()
lowercase__ : str = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_)
self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
| 12 | 0 |
from __future__ import annotations
A : Optional[Any] = []
def UpperCamelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int ) -> bool:
"""simple docstring"""
for i in range(len(__magic_name__ ) ):
if board[row][i] == 1:
return False
for i in range(len(__magic_name__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__magic_name__ , -1 , -1 ) , range(__magic_name__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__magic_name__ , -1 , -1 ) , range(__magic_name__ , len(__magic_name__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCamelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int ) -> bool:
"""simple docstring"""
if row >= len(__magic_name__ ):
solution.append(__magic_name__ )
printboard(__magic_name__ )
print()
return True
for i in range(len(__magic_name__ ) ):
if is_safe(__magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = 1
solve(__magic_name__ , row + 1 )
lowercase__ = 0
return False
def UpperCamelCase ( __magic_name__ : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(__magic_name__ ) ):
for j in range(len(__magic_name__ ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
A : str = 8
A : Union[str, Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 15 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ : Optional[int] = 4
lowercase__ : Optional[Any] = 48
lowercase__ : int = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : List[str] = [6, 6, 6, 6]
lowercase__ : Any = 60
lowercase__ : Tuple = [6, 6, 6, 6]
lowercase__ : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = 4
lowercase__ : Any = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ : str = 1
lowercase__ : Optional[int] = 1
lowercase__ : Optional[int] = 1_26
lowercase__ : Any = 7
lowercase__ : int = 255.0
lowercase__ : List[Any] = """"""
return config
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase__ : Union[str, Any] = """layernorm.weight"""
if name == "norm.bias":
lowercase__ : List[str] = """layernorm.bias"""
if "conv_first" in name:
lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase__ : List[str] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase__ : str = """swin2sr.""" + name
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase__ : Any = key.split(""".""" )
lowercase__ : List[Any] = int(key_split[1] )
lowercase__ : Dict = int(key_split[4] )
lowercase__ : Optional[Any] = config.embed_dim
if "weight" in key:
lowercase__ : List[str] = val[:dim, :]
lowercase__ : List[str] = val[dim : dim * 2, :]
lowercase__ : Optional[Any] = val[-dim:, :]
else:
lowercase__ : Optional[Any] = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
pass
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Dict = get_config(lowercase_ )
lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ )
lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
lowercase__ : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56
lowercase__ : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ : Union[str, Any] = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : int = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
lowercase__ : str = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase__ : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
lowerCamelCase__ : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 12 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Tuple=99 , __lowerCamelCase : Any=32 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : str=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict=True , __lowerCamelCase : int="None" , __lowerCamelCase : Any=3 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : List[str]=None , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = relative_attention
SCREAMING_SNAKE_CASE = position_biased_input
SCREAMING_SNAKE_CASE = pos_att_type
SCREAMING_SNAKE_CASE = scope
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : int ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _snake_case ( self : Any , __lowerCamelCase : List[str] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = DebertaVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = DebertaVaForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = DebertaVaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = DebertaVaForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = DebertaVaForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = DebertaVaForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = DebertaVaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCamelCase )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCamelCase )
@slow
def _snake_case ( self : Optional[int] ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def _snake_case ( self : List[Any] ):
pass
@slow
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) , f"{output[:, 1:4, 1:4]}" ) | 16 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype = jnp.floataa
__lowerCAmelCase : bool = True
def lowercase__ ( self):
'''simple docstring'''
super().setup()
lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
lowercase__ : int = logits.shape[-1]
lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" )
lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 )
lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase__ : Optional[int] = reduction(lowercase_ )
return loss
lowercase__ : int = partial(lowercase_ , reduction=jnp.mean )
lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _snake_case :
__lowerCAmelCase : str = "google/bigbird-roberta-base"
__lowerCAmelCase : int = 3_000
__lowerCAmelCase : int = 10_500
__lowerCAmelCase : int = 128
__lowerCAmelCase : int = 3
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 5
# tx_args
__lowerCAmelCase : float = 3e-5
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 20_000
__lowerCAmelCase : float = 0.0_095
__lowerCAmelCase : str = "bigbird-roberta-natural-questions"
__lowerCAmelCase : str = "training-expt"
__lowerCAmelCase : str = "data/nq-training.jsonl"
__lowerCAmelCase : str = "data/nq-validation.jsonl"
def lowercase__ ( self):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = os.path.join(self.base_dir , self.save_dir)
lowercase__ : str = self.batch_size_per_device * jax.device_count()
@dataclass
class _snake_case :
__lowerCAmelCase : int
__lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""])
lowercase__ : str = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))]
while len(SCREAMING_SNAKE_CASE_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
if seed is not None:
lowercase__ : Any = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
def loss_fn(lowercase_ ):
lowercase__ : Dict = model_inputs.pop("""start_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""end_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Any = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ )
lowercase__ : Tuple = jax.value_and_grad(lowercase_ )
lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params )
lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" )
lowercase__ : str = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Tuple = model_inputs.pop("""start_labels""" )
lowercase__ : List[str] = model_inputs.pop("""end_labels""" )
lowercase__ : int = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs
lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class _snake_case ( train_state.TrainState ):
__lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ )
@dataclass
class _snake_case :
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable = None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : List[str] = model.params
lowercase__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[Any] = args
lowercase__ : Union[str, Any] = data_collator
lowercase__ : str = lr
lowercase__ : Union[str, Any] = params
lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_)
return state
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.args
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size
lowercase__ : int = jax.random.PRNGKey(0)
lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count())
for epoch in range(args.max_epochs):
lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
lowercase__ : List[str] = jax_utils.unreplicate(state.step)
lowercase__ : str = running_loss.item() / i
lowercase__ : Tuple = self.scheduler_fn(state_step - 1)
lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_))
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size)
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size
lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_)
print("""DONE""")
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase__ : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase__ : Dict = from_bytes(state.opt_state , f.read() )
lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) )
lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) )
with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Optional[Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = num_train_steps - warmup_steps
lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ )
lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
def weight_decay_mask(lowercase_ ):
lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ )
lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 12 | 0 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCAmelCase_ : List[Any] = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def __SCREAMING_SNAKE_CASE ( a__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__A : Dict = BeautifulSoup(requests.get(url + location ).content ,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" ,attrs={"""data-tn-component""": """organicJob"""} ):
__A : Optional[int] = job.find("""a""" ,attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
__A : str = job.find("""span""" ,{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 17 |
lowerCamelCase__ : List[str] = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase__ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 12 | 0 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = "xlm-prophetnet"
__lowerCamelCase : Dict = ["past_key_values"]
__lowerCamelCase : Tuple = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self , _lowerCAmelCase = 0.1 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 30522 , _lowerCAmelCase = 1024 , _lowerCAmelCase = 4096 , _lowerCAmelCase = 12 , _lowerCAmelCase = 16 , _lowerCAmelCase = 4096 , _lowerCAmelCase = 12 , _lowerCAmelCase = 16 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 512 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 32 , _lowerCAmelCase = 128 , _lowerCAmelCase = False , _lowerCAmelCase = 0.0 , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 1 , _lowerCAmelCase = 2 , **_lowerCAmelCase , ) -> str:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = num_encoder_layers
_lowerCAmelCase = num_encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = num_decoder_layers
_lowerCAmelCase = num_decoder_attention_heads
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = init_std # Normal(0, this parameter)
_lowerCAmelCase = activation_function
# parameters for xlmprophetnet
_lowerCAmelCase = ngram
_lowerCAmelCase = num_buckets
_lowerCAmelCase = relative_max_distance
_lowerCAmelCase = disable_ngram_loss
_lowerCAmelCase = eps
# 3 Types of Dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = dropout
_lowerCAmelCase = use_cache
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
@property
def _snake_case ( self ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _snake_case ( self , _lowerCAmelCase ) -> int:
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 18 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ):
'''simple docstring'''
lowercase__ : str = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Union[str, Any] = is_training
lowercase__ : Any = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Any = rotary_dim
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Optional[int] = initializer_range
lowercase__ : Optional[int] = None
lowercase__ : str = vocab_size - 1
lowercase__ : Any = vocab_size - 1
lowercase__ : Dict = vocab_size - 1
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ : List[Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = 20
lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase__ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : List[str] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : str = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = 20
lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : Any = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
@require_flax
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = FlaxGPTJModelTester(self)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""")
lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : Optional[Any] = False
lowercase__ : List[str] = model.config.eos_token_id
lowercase__ : List[Any] = jax.jit(model.generate)
lowercase__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences
lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape
lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : str = 0
lowercase__ : List[Any] = 1
lowercase__ : Dict = 0
lowercase__ : Any = 1
lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = fx_state
with torch.no_grad():
lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params)
lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape
lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = 0
lowercase__ : int = 1
lowercase__ : str = 0
lowercase__ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_)
with torch.no_grad():
lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : int = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
| 12 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
if not isinstance(__snake_case, __snake_case ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__snake_case, __snake_case ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
_UpperCamelCase = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['image_processor', 'tokenizer']
__lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor'
__lowerCAmelCase : int = 'AutoTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if images is not None:
lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if text is not None and images is not None:
lowercase__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
@property
def lowercase__ ( self):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 12 | 0 |
def _lowercase( __a : int = 100_0000 ):
a__ =[i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __a ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 20 |
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
lowercase__ : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = 2
while digits < n:
index += 1
lowercase__ : str = len(str(fibonacci(lowercase_ ) ) )
return index
def UpperCamelCase ( lowercase_ = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 12 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
UpperCAmelCase_ : str = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ : List[str] = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ : List[Any] = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
UpperCAmelCase_ : Optional[Any] = model.state_dict()
UpperCAmelCase_ : List[str] = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ : str = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCAmelCase_ : Tuple = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCAmelCase_ : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCAmelCase_ : List[Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCAmelCase_ : List[str] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCAmelCase_ : int = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCAmelCase_ : Optional[Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCAmelCase_ : Union[str, Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCAmelCase_ : List[str] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCAmelCase_ : str = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCAmelCase_ : Dict = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCAmelCase_ : Any = state_dict["cls.predictions.decoder.weight"]
UpperCAmelCase_ : Tuple = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ : int = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCAmelCase_ : Optional[Any] = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 21 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set."""
def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any:
'''simple docstring'''
lowercase__ : Any = Path(lowercase_ )
path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowercase__ : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowercase__ : Dict = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowercase__ : Any = torch.cuda.device_count()
lowercase__ : Any = num_gpus
lowercase__ : Optional[int] = False
if num_gpus > 1:
lowercase__ : Tuple = """MULTI_GPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_xpu_available() and use_xpu:
lowercase__ : Union[str, Any] = torch.xpu.device_count()
lowercase__ : str = num_xpus
lowercase__ : List[Any] = False
if num_xpus > 1:
lowercase__ : str = """MULTI_XPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_npu_available():
lowercase__ : Tuple = torch.npu.device_count()
lowercase__ : Union[str, Any] = num_npus
lowercase__ : Union[str, Any] = False
if num_npus > 1:
lowercase__ : List[Any] = """MULTI_NPU"""
else:
lowercase__ : int = """NO"""
else:
lowercase__ : Union[str, Any] = 0
lowercase__ : str = True
lowercase__ : Union[str, Any] = 1
lowercase__ : int = """NO"""
lowercase__ : Tuple = ClusterConfig(**lowercase_ )
config.to_json_file(lowercase_ )
return path
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ )
parser.add_argument(
"""--config_file""" , default=lowercase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=lowercase_ )
return parser
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 12 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : Dict = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = do_rescale
_a = do_normalize
_a = do_center_crop
_a = crop_size
_a = size
_a = resample
_a = rescale_factor
_a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "shortest_edge" in size:
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_a = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ )
if not is_batched(lowerCAmelCase_ ):
_a = [images]
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 22 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = 'convbert'
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : Tuple = embedding_size
lowercase__ : List[str] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Dict = num_groups
lowercase__ : int = classifier_dropout
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 12 | 0 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCAmelCase : bool = None
__lowerCAmelCase : bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCAmelCase : Optional[Any] = datasets.Audio()
__lowerCAmelCase : Union[str, Any] = 'audio'
__lowerCAmelCase : str = AudioFolderConfig
__lowerCAmelCase : List[str] # definition at the bottom of the script
__lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' )
lowerCamelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCamelCase__ : int = AUDIO_EXTENSIONS
| 12 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=_lowerCamelCase , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=_lowerCamelCase , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=_lowerCamelCase , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=_lowerCamelCase , default='''data/dump''' , help='''The dump file prefix.''' )
__snake_case = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__snake_case = BertTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__snake_case = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__snake_case = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__snake_case = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__snake_case = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__snake_case = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__snake_case = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__snake_case = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f'''{len(_lowerCamelCase )} examples to process.''' )
__snake_case = []
__snake_case = 0
__snake_case = 1_00_00
__snake_case = time.time()
for text in data:
__snake_case = f'''{bos} {text.strip()} {sep}'''
__snake_case = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
rslt.append(_lowerCamelCase )
iter += 1
if iter % interval == 0:
__snake_case = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__snake_case = time.time()
logger.info('''Finished binarization''' )
logger.info(f'''{len(_lowerCamelCase )} examples processed.''' )
__snake_case = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__snake_case = tokenizer.vocab_size
if vocab_size < (1 << 16):
__snake_case = [np.uintaa(_lowerCamelCase ) for d in rslt]
else:
__snake_case = [np.intaa(_lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(_lowerCamelCase , '''wb''' ) as handle:
pickle.dump(rslt_ , _lowerCamelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 24 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : int = (DDPMScheduler,)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.dummy_model()
lowercase__ : List[Any] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : str = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""")
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Tuple = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE_):
if i == len(SCREAMING_SNAKE_CASE_) - 1:
lowercase__ : Optional[int] = -1
else:
lowercase__ : Tuple = timesteps[i + 1]
lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_)
lowercase__ : int = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = [1_00, 87, 50, 1, 0]
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a_ = logging.get_logger(__name__)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : List[str] , *a : Optional[int] , **a : Dict ) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , a , )
super().__init__(*a , **a ) | 25 |
def UpperCamelCase ( lowercase_ ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
'''simple docstring'''
from statistics import mean, stdev
def _a ( _lowerCamelCase , _lowerCamelCase = 3 ) -> list:
"""simple docstring"""
__snake_case : Dict = min(_lowerCamelCase )
__snake_case : int = max(_lowerCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _lowerCamelCase ) for x in data]
def _a ( _lowerCamelCase , _lowerCamelCase = 3 ) -> list:
"""simple docstring"""
__snake_case : List[str] = mean(_lowerCamelCase )
__snake_case : Any = stdev(_lowerCamelCase )
# standardize data
return [round((x - mu) / (sigma) , _lowerCamelCase ) for x in data]
| 26 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : Tuple = do_pad
lowercase__ : Optional[Any] = pad_size
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase__ : str = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size
lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images]
if do_pad:
lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Dict = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 12 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self ):
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_A = 'xvjiarui/stable-diffusion-2-inpainting'
_A, _A = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case_ , safety_checker=snake_case_ )
_A = 'Face of a yellow cat, high resolution, sitting on a park bench'
_A = jax.random.PRNGKey(0 )
_A = 50
_A = jax.device_count()
_A = num_samples * [prompt]
_A = num_samples * [init_image]
_A = num_samples * [mask_image]
_A, _A, _A = pipeline.prepare_inputs(snake_case_ , snake_case_ , snake_case_ )
# shard inputs and rng
_A = replicate(snake_case_ )
_A = jax.random.split(snake_case_ , jax.device_count() )
_A = shard(snake_case_ )
_A = shard(snake_case_ )
_A = shard(snake_case_ )
_A = pipeline(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , jit=snake_case_ )
_A = output.images.reshape(snake_case_ , 512 , 512 , 3 )
_A = images[0, 253:256, 253:256, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 27 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCamelCase__ : Optional[int] = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[Any] = True
while ask_again:
lowercase__ : Tuple = input(lowercase_ )
try:
if default is not None and len(lowercase_ ) == 0:
return default
return convert_value(lowercase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ )
lowercase__ : Any = menu.run(default_choice=lowercase_ )
return convert_value(lowercase_ ) if convert_value is not None else result
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Union[str, Any] = int(lowercase_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[str] = int(lowercase_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : str = int(lowercase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _snake_case ( argparse.RawDescriptionHelpFormatter ):
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""")
return usage
| 12 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( *A, **A ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification', )
SCREAMING_SNAKE_CASE : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE : Optional[int] = image_classifier(A, candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A ), [
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}],
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}],
], )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_classifier([image] * 5, candidate_labels=['A', 'B', 'C'], batch_size=2 )
self.assertEqual(
nested_simplify(A ), [
[
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
],
[
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
],
[
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
],
[
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
],
[
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
],
], )
@require_tf
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification', framework='tf' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE : Optional[int] = image_classifier(A, candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(A ), [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}], )
SCREAMING_SNAKE_CASE : Any = image_classifier([image] * 5, candidate_labels=['A', 'B', 'C'], batch_size=2 )
self.assertEqual(
nested_simplify(A ), [
[
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
],
[
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
],
[
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
],
[
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
],
[
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
{'score': 0.3_33, 'label': ANY(A )},
],
], )
@slow
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = pipeline(
task='zero-shot-image-classification', model='openai/clip-vit-base-patch32', )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE : List[str] = image_classifier(A, candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(A ), [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
], )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier([image] * 5, candidate_labels=['cat', 'plane', 'remote'], batch_size=2 )
self.assertEqual(
nested_simplify(A ), [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5, )
@slow
@require_tf
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = pipeline(
task='zero-shot-image-classification', model='openai/clip-vit-base-patch32', framework='tf' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE : List[Any] = image_classifier(A, candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(A ), [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
], )
SCREAMING_SNAKE_CASE : Tuple = image_classifier([image] * 5, candidate_labels=['cat', 'plane', 'remote'], batch_size=2 )
self.assertEqual(
nested_simplify(A ), [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5, )
| 28 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( UpperCAmelCase_ ):
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = []
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_init_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_evaluate""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_predict""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_save""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_log""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_)
return Trainer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
# Order doesn't matter
lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_)
else:
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = ["""on_init_end""", """on_train_begin"""]
lowercase__ : Union[str, Any] = 0
lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader())
lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(SCREAMING_SNAKE_CASE_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.get_trainer()
lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# Callbacks passed at init are added to the default callbacks
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : Tuple = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# We can also add, pop, or remove by instance
lowercase__ : Union[str, Any] = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# Independent log/save/eval
lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""")
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""")
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
lowercase__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
| 12 | 0 |
from heapq import heappop, heappush
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : Dict = grid.shape
UpperCAmelCase_ : str = [-1, 1, 0, 0]
UpperCAmelCase_ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_, UpperCAmelCase_ : Any = [(0, source)], set()
UpperCAmelCase_ : int = np.full((rows, cols) , np.inf )
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : List[str] = np.empty((rows, cols) , dtype=_lowercase )
UpperCAmelCase_ : Tuple = None
while queue:
((UpperCAmelCase_), (UpperCAmelCase_)) : Dict = heappop(_lowercase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ : Optional[Any] = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = predecessors[x, y]
path.append(_lowercase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowercase ) ):
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ : Tuple = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowercase , (dist + 1, (nx, ny)) )
UpperCAmelCase_ : str = dist + 1
UpperCAmelCase_ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = RoCBertTokenizer
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = filter_non_english
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = i
lowercase__ : Any = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowercase__ : Optional[int] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = i
lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
lowercase__ : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def lowercase__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ : List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False
lowercase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""的""", """人""", """有"""]
lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = False
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_)
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ : Optional[int] = """你好,你是谁"""
lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12 | 0 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCamelCase__ : Any = 100
lowerCamelCase__ : Optional[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCamelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCAmelCase_ ( __UpperCAmelCase : int = 50_00 ) -> int | None:
for number_to_partition in range(1 , __UpperCAmelCase ):
if len(partition(__UpperCAmelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 31 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase__ : Any = kwargs.pop("""text_queries""")
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)):
lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase__ : int = image
lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return results
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase__ : int = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = load_image(inputs["""image"""])
lowercase__ : Any = inputs["""candidate_labels"""]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = candidate_labels.split(""",""")
lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = model_inputs.pop("""target_size""")
lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""")
lowercase__ : Dict = model_inputs.pop("""is_last""")
lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for model_output in model_outputs:
lowercase__ : Optional[int] = model_output["""candidate_label"""]
lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0]
for index in outputs["scores"].nonzero():
lowercase__ : Optional[Any] = outputs["""scores"""][index].item()
lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0])
lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box}
results.append(SCREAMING_SNAKE_CASE_)
lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_)
if top_k:
lowercase__ : Any = results[:top_k]
return results
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""")
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 12 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=32 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=16 , _UpperCamelCase=[32, 64, 128] , _UpperCamelCase=[1, 2, 1] , _UpperCamelCase=[2, 2, 4] , _UpperCamelCase=2 , _UpperCamelCase=2.0 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase="gelu" , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=10 , _UpperCamelCase=8 , _UpperCamelCase=["stage1", "stage2"] , _UpperCamelCase=[1, 2] , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = FocalNetModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = FocalNetBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = FocalNetBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = FocalNetForMaskedImageModeling(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FocalNetForMaskedImageModeling(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FocalNetForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FocalNetForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__A : Union[str, Any] = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__A : int = False
__A : int = False
__A : Dict = False
__A : str = False
__A : Tuple = False
def UpperCamelCase( self ):
_UpperCAmelCase = FocalNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , embed_dim=37 , has_text_modality=_UpperCamelCase )
def UpperCamelCase( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ):
return
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# FocalNet has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reshaped_hidden_states[0].shape
_UpperCAmelCase = (
reshaped_hidden_states[0].view(_UpperCamelCase , _UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
@slow
def UpperCamelCase( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = FocalNetModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCAmelCase = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor([0.2166, -0.4368, 0.2191] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Optional[int] = (FocalNetBackbone,) if is_torch_available() else ()
__A : str = FocalNetConfig
__A : Any = False
def UpperCamelCase( self ):
_UpperCAmelCase = FocalNetModelTester(self ) | 32 |
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase__ : List[str] = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
lowercase__ : List[Any] = val
return f[i][j]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase__ : Tuple = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowercase__ : str = len(lowercase_ )
if num_items != len(lowercase_ ):
lowercase__ : Optional[int] = (
"""The number of weights must be the same as the number of values.\n"""
F'But got {num_items} weights and {len(lowercase_ )} values'
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
lowercase__ : int = (
"""All weights must be integers but got weight of """
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(lowercase_ )
lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : set = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : Dict = [3, 2, 4, 4]
lowerCamelCase__ : List[Any] = [4, 3, 2, 3]
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Dict = 6
lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 12 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'trajectory_transformer'
__lowercase : List[Any] = ['past_key_values']
__lowercase : Dict = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self:List[Any] , _a:Any=1_00 , _a:List[str]=5 , _a:Optional[Any]=1 , _a:List[Any]=1 , _a:Optional[int]=2_49 , _a:Dict=6 , _a:int=17 , _a:Union[str, Any]=25 , _a:Optional[Any]=4 , _a:Union[str, Any]=4 , _a:int=1_28 , _a:Optional[Any]=0.1 , _a:Tuple=0.1 , _a:List[Any]=0.1 , _a:List[str]=0.0006 , _a:List[str]=5_12 , _a:Tuple=0.02 , _a:Optional[Any]=1e-12 , _a:Tuple=1 , _a:Dict=True , _a:str=1 , _a:Union[str, Any]=5_02_56 , _a:List[str]=5_02_56 , **_a:List[str] , ):
snake_case__ = vocab_size
snake_case__ = action_weight
snake_case__ = reward_weight
snake_case__ = value_weight
snake_case__ = max_position_embeddings
snake_case__ = block_size
snake_case__ = action_dim
snake_case__ = observation_dim
snake_case__ = transition_dim
snake_case__ = learning_rate
snake_case__ = n_layer
snake_case__ = n_head
snake_case__ = n_embd
snake_case__ = embd_pdrop
snake_case__ = attn_pdrop
snake_case__ = resid_pdrop
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = kaiming_initializer_range
snake_case__ = use_cache
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 33 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : int = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" )
lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" )
lowercase__ : str = value.float()
for key, value in codebook_state_dict.items():
lowercase__ : Any = value
return upgrade
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ )
else:
lowercase__ : Optional[int] = FlavaConfig()
lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval()
lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ )
if os.path.exists(lowercase_ ):
lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" )
else:
lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ )
hf_model.load_state_dict(lowercase_ )
lowercase__ : Optional[int] = hf_model.state_dict()
lowercase__ : Optional[int] = count_parameters(lowercase_ )
lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ : List[str] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 12 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '▁'
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
SCREAMING_SNAKE_CASE_ = {
'google/pegasus-xsum': 512,
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PegasusTokenizer
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="<pad>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<mask_2>" , lowerCamelCase_="<mask_1>" , lowerCamelCase_=None , lowerCamelCase_=1_0_3 , **lowerCamelCase_ , ) -> Optional[int]:
UpperCamelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
raise TypeError(
F'additional_special_tokens should be of type {type(lowerCamelCase_)}, but is'
F' {type(lowerCamelCase_)}')
UpperCamelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(lowerCamelCase_) , self.offset - 1)
]
if len(set(lowerCamelCase_)) != len(lowerCamelCase_):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.')
UpperCamelCase = additional_special_tokens_extended
else:
UpperCamelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset)]
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , pad_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , mask_token_sent=lowerCamelCase_ , offset=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[Any]:
UpperCamelCase = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
F' {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}')
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase_)
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase_) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_):
copyfile(self.vocab_file , lowerCamelCase_)
return (out_vocab_file,) | 34 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : str = image_size
lowercase__ : int = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Union[str, Any] = size
lowercase__ : Any = do_normalize
lowercase__ : Tuple = image_mean
lowercase__ : str = image_std
def lowercase__ ( self):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = EfficientFormerImageProcessorTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size"""))
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 12 | 0 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = (CMStochasticIterativeScheduler,)
lowerCamelCase : str = 10
def lowercase__ ( self : Optional[Any] , **_lowercase : int ):
SCREAMING_SNAKE_CASE__ : str = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_lowercase )
return config
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = 10
SCREAMING_SNAKE_CASE__ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = self.scheduler_classes[0](**_lowercase )
scheduler.set_timesteps(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE__ : str = 0.1 * sample
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : str ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowercase__ ( self : Tuple ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = 1
scheduler.set_timesteps(_lowercase )
SCREAMING_SNAKE_CASE__ : int = scheduler.timesteps
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_lowercase ):
# 1. scale model input
SCREAMING_SNAKE_CASE__ : Any = scheduler.scale_model_input(_lowercase , _lowercase )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ : Dict = model(_lowercase , _lowercase )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ : Any = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = pred_prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [1_06, 0]
scheduler.set_timesteps(timesteps=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.timesteps
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_lowercase , _lowercase )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ : int = model(_lowercase , _lowercase )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE__ : int = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Dict = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : str = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = [39, 30, 12, 15, 0]
with self.assertRaises(_lowercase , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_lowercase )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE__ : Optional[int] = len(_lowercase )
with self.assertRaises(_lowercase , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 35 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,unet=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,safety_checker=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 512 ,SCREAMING_SNAKE_CASE_ = 512 ,SCREAMING_SNAKE_CASE_ = 50 ,SCREAMING_SNAKE_CASE_ = 7.5 ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 1 ,SCREAMING_SNAKE_CASE_ = 0.0 ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = "pil" ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 1 ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : List[Any] = 1
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Any = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(SCREAMING_SNAKE_CASE_ )}.""" )
# get prompt text embeddings
snake_case : Any = self.tokenizer(
SCREAMING_SNAKE_CASE_ ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
snake_case : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
snake_case : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case , snake_case , snake_case : Optional[int] = text_embeddings.shape
snake_case : Any = text_embeddings.repeat(1 ,SCREAMING_SNAKE_CASE_ ,1 )
snake_case : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt ,SCREAMING_SNAKE_CASE_ ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case : List[str]
if negative_prompt is None:
snake_case : int = [""""""]
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !="""
F""" {type(SCREAMING_SNAKE_CASE_ )}.""" )
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : Dict = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
snake_case : Optional[Any] = negative_prompt
snake_case : List[str] = text_input_ids.shape[-1]
snake_case : List[str] = self.tokenizer(
SCREAMING_SNAKE_CASE_ ,padding="""max_length""" ,max_length=SCREAMING_SNAKE_CASE_ ,truncation=SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ,)
snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case : Optional[Any] = uncond_embeddings.shape[1]
snake_case : Union[str, Any] = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,1 )
snake_case : Any = uncond_embeddings.view(batch_size * num_images_per_prompt ,SCREAMING_SNAKE_CASE_ ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
snake_case : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case : Optional[int] = torch.randn(
SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,device="""cpu""" ,dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
snake_case : Any = torch.randn(SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,device="""cpu""" ,dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
snake_case : List[Any] = torch.randn(
SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,device=self.device ,dtype=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = torch.randn(SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,device=self.device ,dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
snake_case : str = latents_reference.to(self.device )
snake_case : Optional[int] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
snake_case : Optional[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
snake_case : List[Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
snake_case : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
snake_case : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
snake_case : List[str] = 0 if dx < 0 else dx
snake_case : Any = 0 if dy < 0 else dy
snake_case : Dict = max(-dx ,0 )
snake_case : Optional[Any] = max(-dy ,0 )
# import pdb
# pdb.set_trace()
snake_case : Optional[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : List[str] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : int = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# predict the noise residual
snake_case : str = self.unet(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case , snake_case : str = noise_pred.chunk(2 )
snake_case : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case : int = self.scheduler.step(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = 1 / 0.1_82_15 * latents
snake_case : Any = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
snake_case : str = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if self.safety_checker is not None:
snake_case : Tuple = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) ,return_tensors="""pt""" ).to(
self.device )
snake_case , snake_case : Optional[Any] = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
snake_case : List[str] = None
if output_type == "pil":
snake_case : Tuple = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ ,nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 36 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
raise RuntimeError("""CUDA out of memory.""" )
class _snake_case ( nn.Module ):
def __init__( self):
'''simple docstring'''
super().__init__()
lowercase__ : Optional[Any] = nn.Linear(3 , 4)
lowercase__ : Union[str, Any] = nn.BatchNormad(4)
lowercase__ : str = nn.Linear(4 , 5)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_)))
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, """hello"""])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function(1_28 , """hello""" , """world""")
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0])
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
raise ValueError("""Oops, we had an error!""")
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0])
@require_cuda
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = torch.cuda.memory_allocated()
lowercase__ : str = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_)
self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
| 12 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_lowercase = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
_lowercase = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_lowercase = field(
default=A__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
_lowercase = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowercase = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowercase = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'A csv or a json file containing the training data.'} )
_lowercase = field(
default=A__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
_lowercase = field(default=A__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def _UpperCamelCase( self : int ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
a__ : Dict = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
a__ : int = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
default=A__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_lowercase = field(
default=A__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_lowercase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowercase = field(
default=A__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase_ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__, a__, a__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__, a__, a__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
a__ : Tuple = training_args.get_process_log_level()
logger.setLevel(__a )
datasets.utils.logging.set_verbosity(__a )
transformers.utils.logging.set_verbosity(__a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
a__ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a__ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
a__ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
a__ : str = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
a__ : Any = data_args.train_file.split("." )[-1]
a__ : Dict = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
a__ : Tuple = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(f'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
a__ : List[str] = load_dataset("csv" , data_files=__a , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
a__ : Tuple = load_dataset("json" , data_files=__a , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
a__ : Dict = raw_datasets["train"].features["label"].names
a__ : Optional[Any] = len(__a )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
a__ : Union[str, Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__a , )
a__ : Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
a__ : Optional[Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
a__ : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
a__ : Dict = {"Refused": 0, "Entailed": 1}
a__ : List[Any] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
a__ : Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__a ):
# Tokenize the texts
def _convert_table_text_to_pandas(__a ):
a__ : List[str] = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
a__ : List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
a__ : str = examples["statement"]
a__ : Tuple = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
a__ : str = tokenizer(__a , __a , padding=__a , max_length=__a , truncation=__a )
a__ : Tuple = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
a__ : str = raw_datasets.map(
__a , batched=__a , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
a__ : Optional[Any] = raw_datasets["train"]
if data_args.max_train_samples is not None:
a__ : Optional[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
a__ : Dict = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
a__ : int = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
a__ : Dict = raw_datasets["test"]
if data_args.max_predict_samples is not None:
a__ : Union[str, Any] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__a ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__a ):
a__ : Dict = p.predictions[0] if isinstance(p.predictions , __a ) else p.predictions
a__ : Optional[Any] = np.argmax(__a , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
a__ : Optional[int] = default_data_collator
elif training_args.fpaa:
a__ : int = DataCollatorWithPadding(__a , pad_to_multiple_of=8 )
else:
a__ : Any = None
# Initialize our Trainer
a__ : Optional[Any] = Trainer(
model=__a , args=__a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__a , tokenizer=__a , data_collator=__a , )
# Training
if training_args.do_train:
a__ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
a__ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a__ : Tuple = last_checkpoint
a__ : Union[str, Any] = trainer.train(resume_from_checkpoint=__a )
a__ : int = train_result.metrics
a__ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__a )
)
a__ : Optional[int] = min(__a , len(__a ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , __a )
trainer.save_metrics("train" , __a )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Any = trainer.evaluate(eval_dataset=__a )
a__ : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__a )
a__ : Optional[int] = min(__a , len(__a ) )
trainer.log_metrics("eval" , __a )
trainer.save_metrics("eval" , __a )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
a__ : Optional[Any] = predict_dataset.remove_columns("label" )
a__ : List[str] = trainer.predict(__a , metric_key_prefix="predict" ).predictions
a__ : Dict = np.argmax(__a , axis=1 )
a__ : Union[str, Any] = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(__a , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(__a ):
a__ : Union[str, Any] = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
a__ : Any = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**__a )
else:
trainer.create_model_card(**__a )
def UpperCamelCase_ ( __a ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 37 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ : Optional[int] = 4
lowercase__ : Optional[Any] = 48
lowercase__ : int = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : List[str] = [6, 6, 6, 6]
lowercase__ : Any = 60
lowercase__ : Tuple = [6, 6, 6, 6]
lowercase__ : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = 4
lowercase__ : Any = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ : str = 1
lowercase__ : Optional[int] = 1
lowercase__ : Optional[int] = 1_26
lowercase__ : Any = 7
lowercase__ : int = 255.0
lowercase__ : List[Any] = """"""
return config
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase__ : Union[str, Any] = """layernorm.weight"""
if name == "norm.bias":
lowercase__ : List[str] = """layernorm.bias"""
if "conv_first" in name:
lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase__ : List[str] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase__ : str = """swin2sr.""" + name
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase__ : Any = key.split(""".""" )
lowercase__ : List[Any] = int(key_split[1] )
lowercase__ : Dict = int(key_split[4] )
lowercase__ : Optional[Any] = config.embed_dim
if "weight" in key:
lowercase__ : List[str] = val[:dim, :]
lowercase__ : List[str] = val[dim : dim * 2, :]
lowercase__ : Optional[Any] = val[-dim:, :]
else:
lowercase__ : Optional[Any] = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
pass
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Dict = get_config(lowercase_ )
lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ )
lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
lowercase__ : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56
lowercase__ : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ : Union[str, Any] = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : int = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
lowercase__ : str = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase__ : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
lowerCamelCase__ : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Dict = 1_3
snake_case__ : Any = 7
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = True
snake_case__ : Optional[int] = True
snake_case__ : str = True
snake_case__ : Optional[Any] = 9_9
snake_case__ : Dict = 3_8_4
snake_case__ : Optional[Any] = 2
snake_case__ : List[str] = 4
snake_case__ : Union[str, Any] = 3_7
snake_case__ : Optional[Any] = """gelu"""
snake_case__ : Optional[Any] = 0.1
snake_case__ : Optional[Any] = 0.1
snake_case__ : List[Any] = 5_1_2
snake_case__ : Tuple = 1_6
snake_case__ : Tuple = 2
snake_case__ : List[str] = 0.02
snake_case__ : Tuple = 3
snake_case__ : List[str] = 4
snake_case__ : Optional[Any] = 1_2_8
snake_case__ : str = 2
snake_case__ : Union[str, Any] = 9
snake_case__ : Optional[int] = 1
snake_case__ : List[Any] = None
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[str] = None
snake_case__ : Dict = None
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFConvBertModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ : int = [input_ids, input_mask]
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = TFConvBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
snake_case__ : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = self.num_labels
snake_case__ : int = TFConvBertForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = self.num_choices
snake_case__ : List[Any] = TFConvBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case__ : List[Any] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case__ : List[Any] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case__ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = self.num_labels
snake_case__ : List[Any] = TFConvBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = TFConvBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Union[str, Any] = config_and_inputs
snake_case__ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Dict = TFConvBertModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
snake_case__ : List[Any] = True
if hasattr(__SCREAMING_SNAKE_CASE , """use_cache""" ):
snake_case__ : Optional[Any] = True
snake_case__ : str = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : Dict = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Tuple = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = len(model(__SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE , saved_model=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , """saved_model""" , """1""" )
snake_case__ : List[str] = tf.keras.models.load_model(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
snake_case__ : Optional[int] = outputs["""encoder_hidden_states"""]
snake_case__ : str = outputs["""encoder_attentions"""]
else:
snake_case__ : List[str] = outputs["""hidden_states"""]
snake_case__ : Tuple = outputs["""attentions"""]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Tuple = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] = True
snake_case__ : Dict = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : List[str] = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
snake_case__ : int = outputs.decoder_attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = True
snake_case__ : Dict = False
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ : str = True
snake_case__ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
snake_case__ : List[Any] = True
snake_case__ : Any = True
snake_case__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
snake_case__ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : int = model(__SCREAMING_SNAKE_CASE )[0]
snake_case__ : Any = [1, 6, 7_6_8]
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
| 38 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype = jnp.floataa
__lowerCAmelCase : bool = True
def lowercase__ ( self):
'''simple docstring'''
super().setup()
lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
lowercase__ : int = logits.shape[-1]
lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" )
lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 )
lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase__ : Optional[int] = reduction(lowercase_ )
return loss
lowercase__ : int = partial(lowercase_ , reduction=jnp.mean )
lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _snake_case :
__lowerCAmelCase : str = "google/bigbird-roberta-base"
__lowerCAmelCase : int = 3_000
__lowerCAmelCase : int = 10_500
__lowerCAmelCase : int = 128
__lowerCAmelCase : int = 3
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 5
# tx_args
__lowerCAmelCase : float = 3e-5
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 20_000
__lowerCAmelCase : float = 0.0_095
__lowerCAmelCase : str = "bigbird-roberta-natural-questions"
__lowerCAmelCase : str = "training-expt"
__lowerCAmelCase : str = "data/nq-training.jsonl"
__lowerCAmelCase : str = "data/nq-validation.jsonl"
def lowercase__ ( self):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = os.path.join(self.base_dir , self.save_dir)
lowercase__ : str = self.batch_size_per_device * jax.device_count()
@dataclass
class _snake_case :
__lowerCAmelCase : int
__lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""])
lowercase__ : str = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))]
while len(SCREAMING_SNAKE_CASE_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
if seed is not None:
lowercase__ : Any = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
def loss_fn(lowercase_ ):
lowercase__ : Dict = model_inputs.pop("""start_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""end_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Any = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ )
lowercase__ : Tuple = jax.value_and_grad(lowercase_ )
lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params )
lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" )
lowercase__ : str = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Tuple = model_inputs.pop("""start_labels""" )
lowercase__ : List[str] = model_inputs.pop("""end_labels""" )
lowercase__ : int = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs
lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class _snake_case ( train_state.TrainState ):
__lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ )
@dataclass
class _snake_case :
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable = None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : List[str] = model.params
lowercase__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[Any] = args
lowercase__ : Union[str, Any] = data_collator
lowercase__ : str = lr
lowercase__ : Union[str, Any] = params
lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_)
return state
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.args
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size
lowercase__ : int = jax.random.PRNGKey(0)
lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count())
for epoch in range(args.max_epochs):
lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
lowercase__ : List[str] = jax_utils.unreplicate(state.step)
lowercase__ : str = running_loss.item() / i
lowercase__ : Tuple = self.scheduler_fn(state_step - 1)
lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_))
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size)
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size
lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_)
print("""DONE""")
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase__ : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase__ : Dict = from_bytes(state.opt_state , f.read() )
lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) )
lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) )
with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Optional[Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = num_train_steps - warmup_steps
lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ )
lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
def weight_decay_mask(lowercase_ ):
lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ )
lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 12 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase_ = '''__DUMMY_TRANSFORMERS_USER__'''
lowerCAmelCase_ = '''Dummy User'''
lowerCAmelCase_ = '''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'''
lowerCAmelCase_ = '''https://hub-ci.huggingface.co'''
lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
lowerCAmelCase_ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , SCREAMING_SNAKE_CASE__ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE ():
return HfApi(endpoint=SCREAMING_SNAKE_CASE__ )
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = HfFolder.get_token()
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def _cleanup_repo(SCREAMING_SNAKE_CASE__ ):
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
@contextmanager
def _temporary_repo(SCREAMING_SNAKE_CASE__ ):
try:
yield repo_id
finally:
cleanup_repo(SCREAMING_SNAKE_CASE__ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = F'''repo_txt_data-{int(time.time() * 10E3 )}'''
snake_case_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data/text_data.txt''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = F'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
snake_case_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data.zip''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = F'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
snake_case_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data.zip''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return hf_private_dataset_repo_zipped_img_data_ | 39 |
lowerCamelCase__ : List[str] = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase__ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 12 | 0 |
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str:
UpperCamelCase : List[str] = [0] * len(snake_case__ )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
UpperCamelCase : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCamelCase : Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 40 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ):
'''simple docstring'''
lowercase__ : str = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Union[str, Any] = is_training
lowercase__ : Any = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Any = rotary_dim
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Optional[int] = initializer_range
lowercase__ : Optional[int] = None
lowercase__ : str = vocab_size - 1
lowercase__ : Any = vocab_size - 1
lowercase__ : Dict = vocab_size - 1
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ : List[Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = 20
lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase__ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : List[str] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : str = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = 20
lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : Any = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
@require_flax
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = FlaxGPTJModelTester(self)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""")
lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : Optional[Any] = False
lowercase__ : List[str] = model.config.eos_token_id
lowercase__ : List[Any] = jax.jit(model.generate)
lowercase__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences
lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape
lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : str = 0
lowercase__ : List[Any] = 1
lowercase__ : Dict = 0
lowercase__ : Any = 1
lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = fx_state
with torch.no_grad():
lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params)
lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape
lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = 0
lowercase__ : int = 1
lowercase__ : str = 0
lowercase__ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_)
with torch.no_grad():
lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : int = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
| 12 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase__ = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : str ,lowercase__ : int ,lowercase__ : Optional[Any]="<unk>" ,lowercase__ : str="<s>" ,lowercase__ : List[Any]="</s>" ,lowercase__ : Dict="<pad>" ,lowercase__ : int="[SEP]" ,lowercase__ : str="[MASK]" ,lowercase__ : Tuple="[CLS]" ,lowercase__ : Optional[Dict[str, Any]] = None ,**lowercase__ : str ,):
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else bos_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else eos_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else unk_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else pad_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else cls_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,sep_token=lowercase__ ,mask_token=lowercase__ ,cls_token=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase__ ,)
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str ,lowercase__ : Optional[int] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
return self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Dict ):
return self.sp_model.piece_to_id(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ):
__lowercase = self.sp_model.IdToPiece(lowercase__ )
return token
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ):
__lowercase = []
__lowercase = ''''''
__lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
__lowercase = True
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
__lowercase = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[int] ,lowercase__ : bool = False ,lowercase__ : bool = None ,lowercase__ : bool = True ,**lowercase__ : Optional[Any] ,):
__lowercase = kwargs.pop('''use_source_tokenizer''' ,lowercase__ )
__lowercase = self.convert_ids_to_tokens(lowercase__ ,skip_special_tokens=lowercase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowercase = []
__lowercase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase__ ) )
__lowercase = []
sub_texts.append(lowercase__ )
else:
current_sub_text.append(lowercase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__lowercase = re.sub(r''' (\[(MASK|SEP)\])''' ,r'''\1''' ,''' '''.join(lowercase__ ) )
else:
__lowercase = ''''''.join(lowercase__ )
__lowercase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowercase = self.clean_up_tokenization(lowercase__ )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 41 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['image_processor', 'tokenizer']
__lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor'
__lowerCAmelCase : int = 'AutoTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if images is not None:
lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if text is not None and images is not None:
lowercase__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
@property
def lowercase__ ( self):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 12 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42 |
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
lowercase__ : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = 2
while digits < n:
index += 1
lowercase__ : str = len(str(fibonacci(lowercase_ ) ) )
return index
def UpperCamelCase ( lowercase_ = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 12 | 0 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set."""
def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any:
'''simple docstring'''
lowercase__ : Any = Path(lowercase_ )
path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowercase__ : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowercase__ : Dict = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowercase__ : Any = torch.cuda.device_count()
lowercase__ : Any = num_gpus
lowercase__ : Optional[int] = False
if num_gpus > 1:
lowercase__ : Tuple = """MULTI_GPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_xpu_available() and use_xpu:
lowercase__ : Union[str, Any] = torch.xpu.device_count()
lowercase__ : str = num_xpus
lowercase__ : List[Any] = False
if num_xpus > 1:
lowercase__ : str = """MULTI_XPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_npu_available():
lowercase__ : Tuple = torch.npu.device_count()
lowercase__ : Union[str, Any] = num_npus
lowercase__ : Union[str, Any] = False
if num_npus > 1:
lowercase__ : List[Any] = """MULTI_NPU"""
else:
lowercase__ : int = """NO"""
else:
lowercase__ : Union[str, Any] = 0
lowercase__ : str = True
lowercase__ : Union[str, Any] = 1
lowercase__ : int = """NO"""
lowercase__ : Tuple = ClusterConfig(**lowercase_ )
config.to_json_file(lowercase_ )
return path
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ )
parser.add_argument(
"""--config_file""" , default=lowercase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=lowercase_ )
return parser
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 12 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape
_lowerCamelCase : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
_lowerCamelCase : int = emb.weight.data
return lin_layer
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = torch.load(_lowerCAmelCase , map_location="cpu" )
_lowerCamelCase : List[Any] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_lowerCamelCase : Optional[Any] = mam_aaa["model"]
remove_ignore_keys_(_lowerCAmelCase )
_lowerCamelCase : int = state_dict["encoder.embed_tokens.weight"].shape[0]
_lowerCamelCase : str = MaMaaaConfig(
vocab_size=_lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
_lowerCamelCase : Optional[int] = state_dict["decoder.embed_tokens.weight"]
_lowerCamelCase : List[Any] = MaMaaaForConditionalGeneration(_lowerCAmelCase )
model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
UpperCAmelCase_ : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 44 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = 'convbert'
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : Tuple = embedding_size
lowercase__ : List[str] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Dict = num_groups
lowercase__ : int = classifier_dropout
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 12 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Collection[float] | None = None ):
if components is None:
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :Optional[Any] = list(lowerCamelCase__ )
def __len__( self :List[Any] ):
return len(self.__components )
def __str__( self :List[Any] ):
return "(" + ",".join(map(lowerCamelCase__ , self.__components ) ) + ")"
def __add__( self :List[Any] , lowerCamelCase__ :Vector ):
UpperCamelCase__ :Optional[int] = len(self )
if size == len(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = [self.__components[i] + other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self :Any , lowerCamelCase__ :Vector ):
UpperCamelCase__ :List[Any] = len(self )
if size == len(lowerCamelCase__ ):
UpperCamelCase__ :Dict = [self.__components[i] - other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self :str , lowerCamelCase__ :float ):
...
@overload
def __mul__( self :Union[str, Any] , lowerCamelCase__ :Vector ):
...
def __mul__( self :List[Any] , lowerCamelCase__ :float | Vector ):
if isinstance(lowerCamelCase__ , (float, int) ):
UpperCamelCase__ :int = [c * other for c in self.__components]
return Vector(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(self ) == len(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = len(self )
UpperCamelCase__ :List[str] = [self.__components[i] * other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return sum(lowerCamelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __a ( self :Dict ):
return Vector(self.__components )
def __a ( self :Optional[Any] , lowerCamelCase__ :int ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __a ( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :float ):
assert -len(self.__components ) <= pos < len(self.__components )
UpperCamelCase__ :List[Any] = value
def __a ( self :Optional[Any] ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
UpperCamelCase__ :List[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCamelCase__ ) )
def __a ( self :Any , lowerCamelCase__ :Vector , lowerCamelCase__ :bool = False ):
UpperCamelCase__ :Tuple = self * other
UpperCamelCase__ :Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A ( lowercase__ : int ) -> Vector:
assert isinstance(lowercase__ , lowercase__ )
return Vector([0] * dimension )
def A ( lowercase__ : int , lowercase__ : int ) -> Vector:
assert isinstance(lowercase__ , lowercase__ ) and (isinstance(lowercase__ , lowercase__ ))
UpperCamelCase__ :List[Any] = [0] * dimension
UpperCamelCase__ :int = 1
return Vector(lowercase__ )
def A ( lowercase__ : float , lowercase__ : Vector , lowercase__ : Vector ) -> Vector:
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (isinstance(lowercase__ , (int, float) ))
)
return x * scalar + y
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> Vector:
random.seed(lowercase__ )
UpperCamelCase__ :Tuple = [random.randint(lowercase__ , lowercase__ ) for _ in range(lowercase__ )]
return Vector(lowercase__ )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase__ :list[list[float]] , lowerCamelCase__ :int , lowerCamelCase__ :int ):
UpperCamelCase__ :List[Any] = matrix
UpperCamelCase__ :Dict = w
UpperCamelCase__ :int = h
def __str__( self :Tuple ):
UpperCamelCase__ :Tuple = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self :List[str] , lowerCamelCase__ :Matrix ):
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :Any = []
for i in range(self.__height ):
UpperCamelCase__ :Dict = [
self.__matrix[i][j] + other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self :List[Any] , lowerCamelCase__ :Matrix ):
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :List[Any] = []
for i in range(self.__height ):
UpperCamelCase__ :Optional[int] = [
self.__matrix[i][j] - other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self :Dict , lowerCamelCase__ :float ):
...
@overload
def __mul__( self :Optional[int] , lowerCamelCase__ :Vector ):
...
def __mul__( self :Optional[Any] , lowerCamelCase__ :float | Vector ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ): # matrix-vector
if len(lowerCamelCase__ ) == self.__width:
UpperCamelCase__ :List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
UpperCamelCase__ :Union[str, Any] = [
self.__matrix[i][j] * other.component(lowerCamelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCamelCase__ , sum(lowerCamelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCamelCase__ , (int, float) ): # matrix-scalar
UpperCamelCase__ :str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCamelCase__ , self.__width , self.__height )
return None
def __a ( self :Dict ):
return self.__height
def __a ( self :int ):
return self.__width
def __a ( self :List[str] , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __a ( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCamelCase__ :List[str] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __a ( self :int , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
UpperCamelCase__ :Any = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCamelCase__ ) ):
UpperCamelCase__ :Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __a ( self :str , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCamelCase__ , lowerCamelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __a ( self :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCamelCase__ :str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCamelCase__ ) for y in range(self.__width )
]
return sum(lowerCamelCase__ )
def A ( lowercase__ : int ) -> Matrix:
UpperCamelCase__ :list[list[float]] = [[0] * n for _ in range(lowercase__ )]
return Matrix(lowercase__ , lowercase__ , lowercase__ )
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> Matrix:
random.seed(lowercase__ )
UpperCamelCase__ :list[list[float]] = [
[random.randint(lowercase__ , lowercase__ ) for _ in range(lowercase__ )] for _ in range(lowercase__ )
]
return Matrix(lowercase__ , lowercase__ , lowercase__ ) | 45 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCAmelCase : bool = None
__lowerCAmelCase : bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCAmelCase : Optional[Any] = datasets.Audio()
__lowerCAmelCase : Union[str, Any] = 'audio'
__lowerCAmelCase : str = AudioFolderConfig
__lowerCAmelCase : List[str] # definition at the bottom of the script
__lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' )
lowerCamelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCamelCase__ : int = AUDIO_EXTENSIONS
| 12 | 0 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_lowerCAmelCase : Optional[int] = '''<<<<<<< This should probably be modified because it mentions: '''
_lowerCAmelCase : str = '''=======
>>>>>>>
'''
_lowerCAmelCase : Dict = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_lowerCAmelCase : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A_ ( _a ):
@staticmethod
def _lowercase ( __lowerCAmelCase: ArgumentParser ):
'''simple docstring'''
_lowerCamelCase : List[str] = parser.add_parser(
"convert" ,help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." ,)
train_parser.add_argument(
"--tfds_path" ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." ,)
train_parser.add_argument(
"--datasets_directory" ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self: str ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = get_logger("datasets-cli/converting" )
_lowerCamelCase : Any = tfds_path
_lowerCamelCase : Dict = datasets_directory
def _lowercase ( self: Tuple ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowerCamelCase : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_lowerCamelCase : int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[Any] = []
_lowerCamelCase : str = {}
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Tuple = os.listdir(__lowerCAmelCase )
else:
_lowerCamelCase : str = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
_lowerCamelCase : Tuple = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowerCAmelCase ,encoding="utf-8" ) as f:
_lowerCamelCase : Optional[Any] = f.readlines()
_lowerCamelCase : int = []
_lowerCamelCase : List[Any] = False
_lowerCamelCase : List[str] = False
_lowerCamelCase : List[str] = []
for line in lines:
_lowerCamelCase : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCamelCase : Optional[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_lowerCamelCase : Optional[int] = ""
continue
elif "from absl import logging" in out_line:
_lowerCamelCase : Dict = "from datasets import logging\n"
elif "getLogger" in out_line:
_lowerCamelCase : Union[str, Any] = out_line.replace("getLogger" ,"get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = list(filter(lambda __lowerCAmelCase : e in out_line ,__lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + "\n" )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCamelCase : Dict = re.sub(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCamelCase : Any = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" ,__lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_lowerCamelCase : str = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCamelCase : Optional[int] = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCamelCase : Union[str, Any] = f_name.replace(".py" ,"" )
_lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase ,"w" ,encoding="utf-8" ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_lowerCamelCase : Dict = os.path.basename(__lowerCAmelCase )
_lowerCamelCase : Any = imports_to_builder_map[f_name.replace(".py" ,"" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__lowerCAmelCase ,__lowerCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" ) | 46 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : int = (DDPMScheduler,)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.dummy_model()
lowercase__ : List[Any] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : str = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""")
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Tuple = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE_):
if i == len(SCREAMING_SNAKE_CASE_) - 1:
lowercase__ : Optional[int] = -1
else:
lowercase__ : Tuple = timesteps[i + 1]
lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_)
lowercase__ : int = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = [1_00, 87, 50, 1, 0]
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
def UpperCamelCase ( lowercase_ ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
def A ( UpperCamelCase_ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_ , np.ndarray ):
return list(tensor.shape )
lowerCAmelCase__ = tf.shape(UpperCamelCase_ )
if tensor.shape == tf.TensorShape(UpperCamelCase_ ):
return dynamic
lowerCAmelCase__ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCamelCase_ )]
def A ( UpperCamelCase_ : tf.Tensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[str] = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCamelCase_ , name=UpperCamelCase_ )
def A ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=1E-5 , UpperCamelCase_ : Dict=-1 ) -> int:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
lowerCAmelCase__ ,lowerCAmelCase__ = tf.nn.moments(UpperCamelCase_ , axes=[axis] , keepdims=UpperCamelCase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCAmelCase__ = [1] * inputs.shape.rank
lowerCAmelCase__ = shape_list(UpperCamelCase_ )[axis]
lowerCAmelCase__ = tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
# Compute layer normalization using the batch_normalization
# function.
lowerCAmelCase__ = tf.nn.batch_normalization(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , offset=UpperCamelCase_ , scale=UpperCamelCase_ , variance_epsilon=UpperCamelCase_ , )
return outputs
def A ( UpperCamelCase_ : int , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : str=-1 ) -> Union[str, Any]:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCAmelCase__ = tf.shape(UpperCamelCase_ )
lowerCAmelCase__ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCAmelCase__ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
def A ( UpperCamelCase_ : tf.Tensor ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(UpperCamelCase_ , tf.Tensor ):
lowerCAmelCase__ = tf.convert_to_tensor(UpperCamelCase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCAmelCase__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCAmelCase__ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCAmelCase__ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A ( UpperCamelCase_ : tf.Tensor , UpperCamelCase_ : int , UpperCamelCase_ : str = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
UpperCamelCase_ , tf.cast(UpperCamelCase_ , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCamelCase_ )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCAmelCase__ = [x for x in data if len(UpperCamelCase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
lowerCAmelCase__ = np.asarray(UpperCamelCase_ )
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.array_split(UpperCamelCase_ , UpperCamelCase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCAmelCase__ = np.array_split(UpperCamelCase_ , UpperCamelCase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = chunk_data
else:
lowerCAmelCase__ = data
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
if name in group.attrs:
lowerCAmelCase__ = [n.decode("utf8" ) if hasattr(UpperCamelCase_ , "decode" ) else n for n in group.attrs[name]]
else:
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(UpperCamelCase_ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def A ( UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
def _expand_single_ad_tensor(UpperCamelCase_ : Optional[Any] ):
if isinstance(UpperCamelCase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCamelCase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCamelCase_ )
| 48 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : Tuple = do_pad
lowercase__ : Optional[Any] = pad_size
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase__ : str = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size
lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images]
if do_pad:
lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Dict = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 12 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCamelCase__ : Optional[int] = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[Any] = True
while ask_again:
lowercase__ : Tuple = input(lowercase_ )
try:
if default is not None and len(lowercase_ ) == 0:
return default
return convert_value(lowercase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ )
lowercase__ : Any = menu.run(default_choice=lowercase_ )
return convert_value(lowercase_ ) if convert_value is not None else result
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Union[str, Any] = int(lowercase_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[str] = int(lowercase_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : str = int(lowercase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _snake_case ( argparse.RawDescriptionHelpFormatter ):
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""")
return usage
| 12 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCamelCase : int = datasets.utils.logging.get_logger(__name__)
UpperCamelCase : List[str] = ['names', 'prefix']
UpperCamelCase : List[str] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
UpperCamelCase : Optional[int] = ['encoding_errors', 'on_bad_lines']
UpperCamelCase : Optional[int] = ['date_format']
@dataclass
class UpperCamelCase__ (datasets.BuilderConfig ):
'''simple docstring'''
_UpperCamelCase = ","
_UpperCamelCase = None
_UpperCamelCase = "infer"
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = True
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = False
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = None
_UpperCamelCase = "."
_UpperCamelCase = None
_UpperCamelCase = '"'
_UpperCamelCase = 0
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 0
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = None
_UpperCamelCase = 10000
_UpperCamelCase = None
_UpperCamelCase = "strict"
_UpperCamelCase = "error"
_UpperCamelCase = None
def UpperCamelCase_ ( self ):
if self.delimiter is not None:
lowerCamelCase__ = self.delimiter
if self.column_names is not None:
lowerCamelCase__ = self.column_names
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,_lowerCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCamelCase__ (datasets.ArrowBasedBuilder ):
'''simple docstring'''
_UpperCamelCase = CsvConfig
def UpperCamelCase_ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase ,(str, list, tuple) ):
lowerCamelCase__ = data_files
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
lowerCamelCase__ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )]
lowerCamelCase__ = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [files]
lowerCamelCase__ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase ,gen_kwargs={"""files""": files} ) )
return splits
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if self.config.features is not None:
lowerCamelCase__ = self.config.features.arrow_schema
if all(not require_storage_cast(_lowerCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
lowerCamelCase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=_lowerCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCamelCase__ = table_cast(_lowerCAmelCase ,_lowerCAmelCase )
return pa_table
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCamelCase__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCAmelCase ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
lowerCamelCase__ = pd.read_csv(_lowerCAmelCase ,iterator=_lowerCAmelCase ,dtype=_lowerCAmelCase ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = pa.Table.from_pandas(_lowerCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}''' )
raise
| 50 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "" ) -> dict[str, float]:
"""simple docstring"""
UpperCAmelCase = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
UpperCAmelCase = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCAmelCase = soup.find_all('''td''' , attrs='''titleColumn''' )
UpperCAmelCase = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
}
def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "IMDb_Top_250_Movies.csv" ) -> None:
"""simple docstring"""
UpperCAmelCase = get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE_ , '''w''' , newline='''''' ) as out_file:
UpperCAmelCase = csv.writer(SCREAMING_SNAKE_CASE_ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 51 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( UpperCAmelCase_ ):
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = []
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_init_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_evaluate""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_predict""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_save""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_log""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_)
return Trainer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
# Order doesn't matter
lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_)
else:
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = ["""on_init_end""", """on_train_begin"""]
lowercase__ : Union[str, Any] = 0
lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader())
lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(SCREAMING_SNAKE_CASE_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.get_trainer()
lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# Callbacks passed at init are added to the default callbacks
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : Tuple = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# We can also add, pop, or remove by instance
lowercase__ : Union[str, Any] = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# Independent log/save/eval
lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""")
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""")
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
lowercase__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
| 12 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def __A ( a_ :np.ndarray) -> np.ndarray:
__a , __a , __a : Dict = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def __A ( a_ :np.ndarray) -> np.ndarray:
return (gray > 1_27) & (gray <= 2_55)
def __A ( a_ :np.ndarray , a_ :np.ndarray) -> np.ndarray:
__a : Any = np.zeros_like(a_)
__a : Dict = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1))
# Copy image to padded image
__a : Any = image
# Iterate over image & apply kernel
for x in range(image.shape[1]):
for y in range(image.shape[0]):
__a : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__a : Tuple = int(summation > 0)
return output
if __name__ == "__main__":
# read original image
A = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
A = np.array(Image.open(lena_path))
# kernel to be applied
A = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
A = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
A = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''') | 52 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = RoCBertTokenizer
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = filter_non_english
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = i
lowercase__ : Any = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowercase__ : Optional[int] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = i
lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
lowercase__ : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def lowercase__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ : List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False
lowercase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""的""", """人""", """有"""]
lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = False
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_)
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ : Optional[int] = """你好,你是谁"""
lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = XLMProphetNetTokenizer
a_ = False
a_ = True
def lowercase ( self : List[str] ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = '[PAD]'
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCAmelCase_ ) , 1_0_1_2 )
def lowercase ( self : Any ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = XLMProphetNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = 'Hello World!'
__lowerCAmelCase = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
# fmt: off
__lowerCAmelCase = {'input_ids': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 53 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase__ : Any = kwargs.pop("""text_queries""")
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)):
lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase__ : int = image
lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return results
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase__ : int = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = load_image(inputs["""image"""])
lowercase__ : Any = inputs["""candidate_labels"""]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = candidate_labels.split(""",""")
lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = model_inputs.pop("""target_size""")
lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""")
lowercase__ : Dict = model_inputs.pop("""is_last""")
lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for model_output in model_outputs:
lowercase__ : Optional[int] = model_output["""candidate_label"""]
lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0]
for index in outputs["scores"].nonzero():
lowercase__ : Optional[Any] = outputs["""scores"""][index].item()
lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0])
lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box}
results.append(SCREAMING_SNAKE_CASE_)
lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_)
if top_k:
lowercase__ : Any = results[:top_k]
return results
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""")
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 12 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase : Union[str, Any] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: Union[str, Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: bool = True , **_lowerCAmelCase: str , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_center_crop
UpperCAmelCase_ =crop_size
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ =do_convert_rgb
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: int = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase: Tuple , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , param_name="size" , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ =crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , param_name="crop_size" , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ =[convert_to_rgb(_lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ =[self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 |
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase__ : List[str] = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
lowercase__ : List[Any] = val
return f[i][j]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase__ : Tuple = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowercase__ : str = len(lowercase_ )
if num_items != len(lowercase_ ):
lowercase__ : Optional[int] = (
"""The number of weights must be the same as the number of values.\n"""
F'But got {num_items} weights and {len(lowercase_ )} values'
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
lowercase__ : int = (
"""All weights must be integers but got weight of """
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(lowercase_ )
lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : set = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : Dict = [3, 2, 4, 4]
lowerCamelCase__ : List[Any] = [4, 3, 2, 3]
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Dict = 6
lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 12 | 0 |
import random
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
__A = a[left_index]
__A = left_index + 1
for j in range(left_index + 1 , a_ ):
if a[j] < pivot:
__A , __A = a[i], a[j]
i += 1
__A , __A = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
if left < right:
__A = random.randint(a_ , right - 1 )
__A , __A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__A = partition(a_ , a_ , a_ )
quick_sort_random(
a_ , a_ , a_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a_ , pivot_index + 1 , a_ ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = input("Enter numbers separated by a comma:\n" ).strip()
__A = [int(a_ ) for item in user_input.split("," )]
quick_sort_random(a_ , 0 , len(a_ ) )
print(a_ )
if __name__ == "__main__":
main()
| 55 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : int = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" )
lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" )
lowercase__ : str = value.float()
for key, value in codebook_state_dict.items():
lowercase__ : Any = value
return upgrade
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ )
else:
lowercase__ : Optional[int] = FlavaConfig()
lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval()
lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ )
if os.path.exists(lowercase_ ):
lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" )
else:
lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ )
hf_model.load_state_dict(lowercase_ )
lowercase__ : Optional[int] = hf_model.state_dict()
lowercase__ : Optional[int] = count_parameters(lowercase_ )
lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ : List[str] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 12 | 0 |
'''simple docstring'''
def _a (lowercase__ : list , lowercase__ : list , lowercase__ : int ) -> int:
"""simple docstring"""
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__snake_case = [p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
__snake_case = sorted(lowercase__ )
# declaring useful variables
__snake_case = len(lowercase__ )
__snake_case = 0
__snake_case = 0
__snake_case = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__snake_case = sorted_profit_by_weight[length - i - 1]
__snake_case = profit_by_weight.index(lowercase__ )
__snake_case = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
_a : str = [int(x) for x in input("Input profits separated by spaces: ").split()]
_a : str = [int(x) for x in input("Input weights separated by spaces: ").split()]
_a : Any = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 56 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : str = image_size
lowercase__ : int = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Union[str, Any] = size
lowercase__ : Any = do_normalize
lowercase__ : Tuple = image_mean
lowercase__ : str = image_std
def lowercase__ ( self):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = EfficientFormerImageProcessorTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size"""))
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 12 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ : Any = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> List[Any]:
if attention_mask is None:
UpperCamelCase_: Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCamelCase_: Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCamelCase_: Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=9_9 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=0.0_2 , ):
UpperCamelCase_: Any = parent
UpperCamelCase_: List[Any] = batch_size
UpperCamelCase_: List[Any] = seq_length
UpperCamelCase_: Optional[int] = is_training
UpperCamelCase_: Optional[int] = use_labels
UpperCamelCase_: List[str] = vocab_size
UpperCamelCase_: Any = hidden_size
UpperCamelCase_: Tuple = num_hidden_layers
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: Union[str, Any] = intermediate_size
UpperCamelCase_: Optional[Any] = hidden_act
UpperCamelCase_: Tuple = hidden_dropout_prob
UpperCamelCase_: Tuple = attention_probs_dropout_prob
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: str = eos_token_id
UpperCamelCase_: Optional[Any] = pad_token_id
UpperCamelCase_: List[str] = bos_token_id
UpperCamelCase_: List[str] = initializer_range
def _a ( self ):
UpperCamelCase_: Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCamelCase_: Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCamelCase_: Dict = shift_tokens_right(_lowerCamelCase , 1 , 2 )
UpperCamelCase_: Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , )
UpperCamelCase_: Union[str, Any] = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = 2_0
UpperCamelCase_: Tuple = model_class_name(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model.encode(inputs_dict['input_ids'] )
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCamelCase_: Any = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCamelCase_: int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_: Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase_: Any = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: List[Any] = model.decode(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = 2_0
UpperCamelCase_: List[str] = model_class_name(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model.encode(inputs_dict['input_ids'] )
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCamelCase_: Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCamelCase_: Dict = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase_: int = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase_: Any = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
UpperCamelCase_: Optional[int] = model.decode(_lowerCamelCase , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase )
UpperCamelCase_: List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Any =99
def _a ( self ):
UpperCamelCase_: Union[str, Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
UpperCamelCase_: str = input_ids.shape[0]
UpperCamelCase_: str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: str = self._get_config_and_data()
UpperCamelCase_: Optional[Any] = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
UpperCamelCase_: Any = lm_model(input_ids=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
UpperCamelCase_: Optional[int] = FlaxBlenderbotForConditionalGeneration(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
UpperCamelCase_: Tuple = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
UpperCamelCase_: Tuple = lm_model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
UpperCamelCase_: Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
UpperCamelCase_: Tuple = shift_tokens_right(_lowerCamelCase , 1 , 2 )
UpperCamelCase_: Union[str, Any] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
UpperCamelCase_: Optional[int] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase , UpperCAmelCase_ ):
"""simple docstring"""
a : Optional[Any] =True
a : Union[str, Any] =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a : Optional[int] =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _a ( self ):
UpperCamelCase_: Optional[int] = FlaxBlenderbotModelTester(self )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_: Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(_lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
return model.encode(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )
with self.subTest('JIT Enabled' ):
UpperCamelCase_: Optional[int] = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_: Dict = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_: Tuple = model_class(_lowerCamelCase )
UpperCamelCase_: Dict = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
UpperCamelCase_: List[str] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return model.decode(
decoder_input_ids=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , encoder_outputs=_lowerCamelCase , )
with self.subTest('JIT Enabled' ):
UpperCamelCase_: List[Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_: Dict = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self ):
for model_class_name in self.all_model_classes:
UpperCamelCase_: Dict = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCamelCase_: Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCamelCase_: List[Any] = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def _a ( self ):
UpperCamelCase_: Optional[int] = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
UpperCamelCase_: List[Any] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
UpperCamelCase_: Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_lowerCamelCase )
UpperCamelCase_: Optional[int] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
UpperCamelCase_: int = ['Sam']
UpperCamelCase_: str = tokenizer(_lowerCamelCase , return_tensors='jax' )
UpperCamelCase_: Union[str, Any] = model.generate(**_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Any = 'Sam is a great name. It means "sun" in Gaelic.'
UpperCamelCase_: Any = tokenizer.batch_decode(_lowerCamelCase , **_lowerCamelCase )
assert generated_txt[0].strip() == tgt_text | 57 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 | 0 |
"""simple docstring"""
__lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
snake_case_ : Optional[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
snake_case_ : Optional[int] = _calculate(days - 1 , __UpperCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
snake_case_ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
snake_case_ : int = _calculate(days - 1 , __UpperCamelCase , 0 )
snake_case_ : Tuple = state_late + state_absent + state_ontime
snake_case_ : List[Any] = prizestrings
return prizestrings
def __lowerCAmelCase ( __UpperCamelCase : int = 3_0 ):
'''simple docstring'''
return _calculate(__UpperCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 58 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
raise RuntimeError("""CUDA out of memory.""" )
class _snake_case ( nn.Module ):
def __init__( self):
'''simple docstring'''
super().__init__()
lowercase__ : Optional[Any] = nn.Linear(3 , 4)
lowercase__ : Union[str, Any] = nn.BatchNormad(4)
lowercase__ : str = nn.Linear(4 , 5)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_)))
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, """hello"""])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function(1_28 , """hello""" , """world""")
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0])
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
raise ValueError("""Oops, we had an error!""")
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0])
@require_cuda
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = torch.cuda.memory_allocated()
lowercase__ : str = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_)
self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
| 12 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__A = logging.get_logger(__name__)
__A = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "dpt"
def __init__(self : Optional[int] , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Any=1E-1_2 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=[2, 5, 8, 11] , UpperCAmelCase_ : List[str]="project" , UpperCAmelCase_ : Dict=[4, 2, 1, 0.5] , UpperCAmelCase_ : Dict=[96, 192, 384, 768] , UpperCAmelCase_ : Dict=256 , UpperCAmelCase_ : List[Any]=-1 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=0.4 , UpperCAmelCase_ : str=255 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=[1, 1_024, 24, 24] , UpperCAmelCase_ : Dict=[0, 1] , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Optional[Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =hidden_size
lowerCamelCase__: List[str] =is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone.")
lowerCamelCase__: str ={
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
lowerCamelCase__: Union[str, Any] =BitConfig(**UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
logger.info("Initializing the config with a `BiT` backbone.")
lowerCamelCase__: Optional[Any] =BitConfig(**UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: int =backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""")
lowerCamelCase__: Optional[int] =backbone_featmap_shape
lowerCamelCase__: Optional[Any] =neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode.")
else:
lowerCamelCase__: List[Any] =None
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: Optional[Any] =[]
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: List[str] =num_attention_heads
lowerCamelCase__: Any =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: Optional[int] =hidden_dropout_prob
lowerCamelCase__: Dict =attention_probs_dropout_prob
lowerCamelCase__: Any =initializer_range
lowerCamelCase__: Optional[int] =layer_norm_eps
lowerCamelCase__: Dict =image_size
lowerCamelCase__: str =patch_size
lowerCamelCase__: Any =num_channels
lowerCamelCase__: Dict =qkv_bias
lowerCamelCase__: Dict =backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']")
lowerCamelCase__: int =readout_type
lowerCamelCase__: str =reassemble_factors
lowerCamelCase__: int =neck_hidden_sizes
lowerCamelCase__: Dict =fusion_hidden_size
lowerCamelCase__: List[str] =head_in_index
lowerCamelCase__: Dict =use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__: Dict =use_auxiliary_head
lowerCamelCase__: List[str] =auxiliary_loss_weight
lowerCamelCase__: Union[str, Any] =semantic_loss_ignore_index
lowerCamelCase__: int =semantic_classifier_dropout
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[str] =copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
lowerCamelCase__: List[Any] =self.backbone_config.to_dict()
lowerCamelCase__: Union[str, Any] =self.__class__.model_type
return output
| 59 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ : Optional[int] = 4
lowercase__ : Optional[Any] = 48
lowercase__ : int = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : List[str] = [6, 6, 6, 6]
lowercase__ : Any = 60
lowercase__ : Tuple = [6, 6, 6, 6]
lowercase__ : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = 4
lowercase__ : Any = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ : str = 1
lowercase__ : Optional[int] = 1
lowercase__ : Optional[int] = 1_26
lowercase__ : Any = 7
lowercase__ : int = 255.0
lowercase__ : List[Any] = """"""
return config
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase__ : Union[str, Any] = """layernorm.weight"""
if name == "norm.bias":
lowercase__ : List[str] = """layernorm.bias"""
if "conv_first" in name:
lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase__ : List[str] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase__ : str = """swin2sr.""" + name
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase__ : Any = key.split(""".""" )
lowercase__ : List[Any] = int(key_split[1] )
lowercase__ : Dict = int(key_split[4] )
lowercase__ : Optional[Any] = config.embed_dim
if "weight" in key:
lowercase__ : List[str] = val[:dim, :]
lowercase__ : List[str] = val[dim : dim * 2, :]
lowercase__ : Optional[Any] = val[-dim:, :]
else:
lowercase__ : Optional[Any] = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
pass
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Dict = get_config(lowercase_ )
lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ )
lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
lowercase__ : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56
lowercase__ : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ : Union[str, Any] = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : int = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
lowercase__ : str = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase__ : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
lowerCamelCase__ : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 12 | 0 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : str = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ : Dict = '''FlavaImageProcessor'''
lowerCamelCase_ : List[str] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__(self , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __magic_name__ , )
snake_case_ : Optional[Any] = kwargs.pop('''feature_extractor''' )
snake_case_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = self.image_processor
def __call__(self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case_ : Optional[int] = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if images is not None:
snake_case_ : Any = self.image_processor(
__magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if text is not None and images is not None:
encoding.update(__magic_name__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase (self ) -> str:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __magic_name__ , )
return self.image_processor_class
@property
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __magic_name__ , )
return self.image_processor
| 60 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype = jnp.floataa
__lowerCAmelCase : bool = True
def lowercase__ ( self):
'''simple docstring'''
super().setup()
lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
lowercase__ : int = logits.shape[-1]
lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" )
lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 )
lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase__ : Optional[int] = reduction(lowercase_ )
return loss
lowercase__ : int = partial(lowercase_ , reduction=jnp.mean )
lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _snake_case :
__lowerCAmelCase : str = "google/bigbird-roberta-base"
__lowerCAmelCase : int = 3_000
__lowerCAmelCase : int = 10_500
__lowerCAmelCase : int = 128
__lowerCAmelCase : int = 3
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 5
# tx_args
__lowerCAmelCase : float = 3e-5
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 20_000
__lowerCAmelCase : float = 0.0_095
__lowerCAmelCase : str = "bigbird-roberta-natural-questions"
__lowerCAmelCase : str = "training-expt"
__lowerCAmelCase : str = "data/nq-training.jsonl"
__lowerCAmelCase : str = "data/nq-validation.jsonl"
def lowercase__ ( self):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = os.path.join(self.base_dir , self.save_dir)
lowercase__ : str = self.batch_size_per_device * jax.device_count()
@dataclass
class _snake_case :
__lowerCAmelCase : int
__lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""])
lowercase__ : str = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))]
while len(SCREAMING_SNAKE_CASE_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
if seed is not None:
lowercase__ : Any = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
def loss_fn(lowercase_ ):
lowercase__ : Dict = model_inputs.pop("""start_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""end_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Any = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ )
lowercase__ : Tuple = jax.value_and_grad(lowercase_ )
lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params )
lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" )
lowercase__ : str = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Tuple = model_inputs.pop("""start_labels""" )
lowercase__ : List[str] = model_inputs.pop("""end_labels""" )
lowercase__ : int = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs
lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class _snake_case ( train_state.TrainState ):
__lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ )
@dataclass
class _snake_case :
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable = None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : List[str] = model.params
lowercase__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[Any] = args
lowercase__ : Union[str, Any] = data_collator
lowercase__ : str = lr
lowercase__ : Union[str, Any] = params
lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_)
return state
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.args
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size
lowercase__ : int = jax.random.PRNGKey(0)
lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count())
for epoch in range(args.max_epochs):
lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
lowercase__ : List[str] = jax_utils.unreplicate(state.step)
lowercase__ : str = running_loss.item() / i
lowercase__ : Tuple = self.scheduler_fn(state_step - 1)
lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_))
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size)
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size
lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_)
print("""DONE""")
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase__ : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase__ : Dict = from_bytes(state.opt_state , f.read() )
lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) )
lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) )
with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Optional[Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = num_train_steps - warmup_steps
lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ )
lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
def weight_decay_mask(lowercase_ ):
lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ )
lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 12 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = MobileBertTokenizer
snake_case__ = MobileBertTokenizerFast
snake_case__ = True
snake_case__ = True
snake_case__ = filter_non_english
snake_case__ = "google/mobilebert-uncased"
def a ( self : Optional[Any] ) -> List[str]:
super().setUp()
lowerCAmelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase__ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
lowerCAmelCase__ = "UNwant\u00E9d,running"
lowerCAmelCase__ = "unwanted, running"
return input_text, output_text
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [9, 6, 7, 12, 10, 11] )
def a ( self : List[str] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = "UNwant\u00E9d,running"
lowerCAmelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# With lower casing
lowerCAmelCase__ = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = "UNwant\u00E9d,running"
lowerCAmelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Any:
lowerCAmelCase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def a ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def a ( self : int ) -> List[str]:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def a ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def a ( self : int ) -> Union[str, Any]:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def a ( self : List[Any] ) -> Dict:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def a ( self : Dict ) -> Any:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def a ( self : Dict ) -> List[str]:
lowerCAmelCase__ = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def a ( self : Optional[int] ) -> int:
lowerCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase__ = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def a ( self : str ) -> Dict:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def a ( self : str ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def a ( self : Optional[Any] ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def a ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def a ( self : List[Any] ) -> Tuple:
lowerCAmelCase__ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a ( self : Dict ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase__ = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ , "do_lower_case" ) else False
lowerCAmelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = ["的", "人", "有"]
lowerCAmelCase__ = "".join(SCREAMING_SNAKE_CASE__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = False
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase__ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 61 |
lowerCamelCase__ : List[str] = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase__ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 12 | 0 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
snake_case = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase__ ( lowercase=True ):
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowerCAmelCase ) )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = None
UpperCamelCase_ : Union[str, Any] = None
def _A ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ):
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : List[str] = dataset_module_factory(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : DatasetBuilder = builder_cls(
cache_dir=UpperCAmelCase_ , config_name=UpperCAmelCase_ , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE : int = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCAmelCase_ ).replace(os.sep , "/" ),
config.DATASET_INFO_FILENAME,
] )
SCREAMING_SNAKE_CASE : int = cached_path(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ )
self.assertTrue(os.path.exists(UpperCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
SCREAMING_SNAKE_CASE : str = dataset_module_factory("wikipedia" , cache_dir=lowercase )
SCREAMING_SNAKE_CASE : Tuple = import_main_class(dataset_module.module_path )
SCREAMING_SNAKE_CASE : DatasetBuilder = builder_cls(
cache_dir=lowercase , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE : Any = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE : Optional[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = dataset_module_factory("wikipedia" , cache_dir=lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=lowercase )
SCREAMING_SNAKE_CASE : DatasetBuilder = builder_cls(
cache_dir=lowercase , config_name="20220301.frr" , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE : List[Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowercase , lowercase )
assert "train" in ds
assert isinstance(ds["train"] , lowercase )
assert next(iter(ds["train"] ) )
| 62 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ):
'''simple docstring'''
lowercase__ : str = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Union[str, Any] = is_training
lowercase__ : Any = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Any = rotary_dim
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Optional[int] = initializer_range
lowercase__ : Optional[int] = None
lowercase__ : str = vocab_size - 1
lowercase__ : Any = vocab_size - 1
lowercase__ : Dict = vocab_size - 1
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ : List[Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = 20
lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase__ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : List[str] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : str = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = 20
lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : Any = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
@require_flax
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = FlaxGPTJModelTester(self)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""")
lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : Optional[Any] = False
lowercase__ : List[str] = model.config.eos_token_id
lowercase__ : List[Any] = jax.jit(model.generate)
lowercase__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences
lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape
lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : str = 0
lowercase__ : List[Any] = 1
lowercase__ : Dict = 0
lowercase__ : Any = 1
lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = fx_state
with torch.no_grad():
lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params)
lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape
lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = 0
lowercase__ : int = 1
lowercase__ : str = 0
lowercase__ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_)
with torch.no_grad():
lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : int = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
| 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['image_processor', 'tokenizer']
__lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor'
__lowerCAmelCase : int = 'AutoTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if images is not None:
lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if text is not None and images is not None:
lowercase__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
@property
def lowercase__ ( self):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 12 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase_ : int = datasets.utils.logging.get_logger(__name__)
lowercase_ : Tuple = ['names', 'prefix']
lowercase_ : str = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowercase_ : Optional[int] = ['encoding_errors', 'on_bad_lines']
lowercase_ : List[Any] = ['date_format']
@dataclass
class _lowerCamelCase ( datasets.BuilderConfig ):
__a = ","
__a = None
__a = "infer"
__a = None
__a = None
__a = None
__a = None
__a = None
__a = True
__a = None
__a = None
__a = None
__a = None
__a = False
__a = None
__a = None
__a = None
__a = True
__a = True
__a = False
__a = True
__a = None
__a = "."
__a = None
__a = '"'
__a = 0
__a = None
__a = None
__a = None
__a = None
__a = True
__a = True
__a = 0
__a = True
__a = False
__a = None
__a = 10000
__a = None
__a = "strict"
__a = "error"
__a = None
def UpperCamelCase_ ( self ) -> Dict:
if self.delimiter is not None:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.column_names
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: str= {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCamelCase ( datasets.ArrowBasedBuilder ):
__a = CsvConfig
def UpperCamelCase_ ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
SCREAMING_SNAKE_CASE__: Optional[int]= dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__: Any= data_files
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Tuple= [files]
SCREAMING_SNAKE_CASE__: str= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
SCREAMING_SNAKE_CASE__: List[Any]= []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= [files]
SCREAMING_SNAKE_CASE__: Optional[int]= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def UpperCamelCase_ ( self , lowerCAmelCase ) -> pa.Table:
if self.config.features is not None:
SCREAMING_SNAKE_CASE__: int= self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
SCREAMING_SNAKE_CASE__: Optional[int]= pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__: Optional[int]= table_cast(lowerCAmelCase , lowerCAmelCase )
return pa_table
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: str= self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE__: str= (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__: List[Any]= pd.read_csv(lowerCAmelCase , iterator=lowerCAmelCase , dtype=lowerCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: int= pa.Table.from_pandas(lowerCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise
| 64 |
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
lowercase__ : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = 2
while digits < n:
index += 1
lowercase__ : str = len(str(fibonacci(lowercase_ ) ) )
return index
def UpperCamelCase ( lowercase_ = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 12 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] ,A : Union[str, Any] ,A : Dict=7 ,A : Optional[int]=3 ,A : List[str]=18 ,A : Union[str, Any]=30 ,A : Tuple=400 ,A : Dict=True ,A : List[str]=None ,A : str=True ,A : Optional[Any]=False ,A : Optional[Any]=True ,A : List[str]=True ,A : Optional[int]=[0.5, 0.5, 0.5] ,A : List[str]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : Optional[int] = max_resolution
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : Tuple = size if size is not None else {"""height""": 18, """width""": 20}
UpperCAmelCase__ : List[str] = do_thumbnail
UpperCAmelCase__ : Optional[int] = do_align_axis
UpperCAmelCase__ : Union[str, Any] = do_pad
UpperCAmelCase__ : Tuple = do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean
UpperCAmelCase__ : List[Any] = image_std
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = DonutImageProcessor if is_vision_available() else None
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = DonutImageProcessingTester(self )
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"""do_resize""" ) )
self.assertTrue(hasattr(A ,"""size""" ) )
self.assertTrue(hasattr(A ,"""do_thumbnail""" ) )
self.assertTrue(hasattr(A ,"""do_align_long_axis""" ) )
self.assertTrue(hasattr(A ,"""do_pad""" ) )
self.assertTrue(hasattr(A ,"""do_normalize""" ) )
self.assertTrue(hasattr(A ,"""image_mean""" ) )
self.assertTrue(hasattr(A ,"""image_std""" ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 20} )
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{"""height""": 84, """width""": 42} )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@is_flaky()
def __lowercase ( self : int ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
UpperCAmelCase__ : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : Tuple = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
@is_flaky()
def __lowercase ( self : List[str] ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
@is_flaky()
def __lowercase ( self : Any ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
UpperCAmelCase__ : List[Any] = image_processing(A ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
| 65 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set."""
def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any:
'''simple docstring'''
lowercase__ : Any = Path(lowercase_ )
path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowercase__ : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowercase__ : Dict = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowercase__ : Any = torch.cuda.device_count()
lowercase__ : Any = num_gpus
lowercase__ : Optional[int] = False
if num_gpus > 1:
lowercase__ : Tuple = """MULTI_GPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_xpu_available() and use_xpu:
lowercase__ : Union[str, Any] = torch.xpu.device_count()
lowercase__ : str = num_xpus
lowercase__ : List[Any] = False
if num_xpus > 1:
lowercase__ : str = """MULTI_XPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_npu_available():
lowercase__ : Tuple = torch.npu.device_count()
lowercase__ : Union[str, Any] = num_npus
lowercase__ : Union[str, Any] = False
if num_npus > 1:
lowercase__ : List[Any] = """MULTI_NPU"""
else:
lowercase__ : int = """NO"""
else:
lowercase__ : Union[str, Any] = 0
lowercase__ : str = True
lowercase__ : Union[str, Any] = 1
lowercase__ : int = """NO"""
lowercase__ : Tuple = ClusterConfig(**lowercase_ )
config.to_json_file(lowercase_ )
return path
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ )
parser.add_argument(
"""--config_file""" , default=lowercase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=lowercase_ )
return parser
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["PoolFormerFeatureExtractor"]
UpperCamelCase = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = 'convbert'
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : Tuple = embedding_size
lowercase__ : List[str] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Dict = num_groups
lowercase__ : int = classifier_dropout
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 12 | 0 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :Union[str, Any] , snake_case__ :Union[str, Any] , snake_case__ :int ) -> Optional[int]:
_lowercase = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowercase = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
_lowercase = F"""{src_lang}-{tgt_lang}"""
_lowercase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=snake_case__ , exist_ok=snake_case__ )
_lowercase = os.path.join(snake_case__ , 'README.md' )
print(F"""Generating {path}""" )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
snake_case = Path(__file__).resolve().parent.parent.parent
snake_case = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name) | 67 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCAmelCase : bool = None
__lowerCAmelCase : bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCAmelCase : Optional[Any] = datasets.Audio()
__lowerCAmelCase : Union[str, Any] = 'audio'
__lowerCAmelCase : str = AudioFolderConfig
__lowerCAmelCase : List[str] # definition at the bottom of the script
__lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' )
lowerCamelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCamelCase__ : int = AUDIO_EXTENSIONS
| 12 | 0 |
from __future__ import annotations
def lowercase__ ( A_: int ) -> bool:
"""simple docstring"""
__UpperCAmelCase =str(A_ )
return len(A_ ) == 9 and set(A_ ) == set("""123456789""" )
def lowercase__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
__UpperCAmelCase =100002 * base_num
if is_9_pandigital(A_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__UpperCAmelCase =1002003 * base_num
if is_9_pandigital(A_ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : int = (DDPMScheduler,)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.dummy_model()
lowercase__ : List[Any] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : str = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""")
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Tuple = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE_):
if i == len(SCREAMING_SNAKE_CASE_) - 1:
lowercase__ : Optional[int] = -1
else:
lowercase__ : Tuple = timesteps[i + 1]
lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_)
lowercase__ : int = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = [1_00, 87, 50, 1, 0]
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12 | 0 |
'''simple docstring'''
from itertools import product
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> list[int]:
__snake_case = sides_number
__snake_case = max_face_number * dice_number
__snake_case = [0] * (max_total + 1)
__snake_case = 1
__snake_case = range(_UpperCAmelCase , max_face_number + 1 )
for dice_numbers in product(_UpperCAmelCase , repeat=_UpperCAmelCase ):
__snake_case = sum(_UpperCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ) -> float:
__snake_case = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__snake_case = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__snake_case = 0
__snake_case = 9
__snake_case = 4 * 9
__snake_case = 6
for peter_total in range(_UpperCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__snake_case = (4**9) * (6**6)
__snake_case = peter_wins_count / total_games_number
__snake_case = round(_UpperCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 69 |
def UpperCamelCase ( lowercase_ ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self : List[Any] ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase_ = torch.arange(self.height * self.width )
lowerCamelCase_ = torch.stack(
[
pixel_indices % self.width,
torch.div(A_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , *lowerCamelCase_ = self.shape
lowerCamelCase_ = int(np.prod(A_ ) )
lowerCamelCase_ = self.get_image_coords()
lowerCamelCase_ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowerCamelCase_ = self.get_camera_rays(A_ )
lowerCamelCase_ = rays.view(A_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self : int , A_ : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCamelCase_ = coords.view(A_ , -1 , 2 )
lowerCamelCase_ = self.resolution()
lowerCamelCase_ = self.fov()
lowerCamelCase_ = (flat.float() / (res - 1)) * 2 - 1
lowerCamelCase_ = fracs * torch.tan(fov / 2 )
lowerCamelCase_ = fracs.view(A_ , -1 , 2 )
lowerCamelCase_ = (
self.z.view(A_ , 1 , 3 )
+ self.x.view(A_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(A_ , 1 , 3 ) * fracs[:, :, 1:]
)
lowerCamelCase_ = directions / directions.norm(dim=-1 , keepdim=A_ )
lowerCamelCase_ = torch.stack(
[
torch.broadcast_to(self.origin.view(A_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(A_ , *A_ , 2 , 3 )
def a__ ( self : Any , A_ : int , A_ : int ) -> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=A_ , height=A_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowerCamelCase_ = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCamelCase_ = -z * 4
lowerCamelCase_ = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
lowerCamelCase_ = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 70 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : Tuple = do_pad
lowercase__ : Optional[Any] = pad_size
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase__ : str = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size
lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images]
if do_pad:
lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Dict = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 12 | 0 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
_lowerCamelCase = """src/transformers"""
# Matches is_xxx_available()
_lowerCamelCase = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
_lowerCamelCase = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCamelCase = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
_lowerCamelCase = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
_lowerCamelCase = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCamelCase = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCamelCase = re.compile(R"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCamelCase = re.compile(R"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
_lowerCamelCase = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
_lowerCamelCase = re.compile(R"""^\s*try:""")
# Catches a line with else:
_lowerCamelCase = re.compile(R"""^\s*else:""")
def a__ ( _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None:
return None
UpperCAmelCase_ : Union[str, Any] = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ : Tuple = f.readlines()
UpperCAmelCase_ : int = 0
while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_ : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_ : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0]
UpperCAmelCase_ : List[Any] = re.findall(r"\[([^\]]+)\]" , _SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase_ : Optional[Any] = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
UpperCAmelCase_ : Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_ : Tuple = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase_ : Optional[int] = lines[line_index]
if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None:
UpperCAmelCase_ : Dict = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
UpperCAmelCase_ : Union[str, Any] = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None:
UpperCAmelCase_ : str = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
UpperCAmelCase_ : Union[str, Any] = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase_ : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_ : Union[str, Any] = []
while (
line_index < len(_SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase_ : Optional[Any] = lines[line_index]
UpperCAmelCase_ : List[Any] = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_ : str = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_ : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase_ : Any = lines[line_index]
UpperCAmelCase_ : List[str] = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase_ : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
"""simple docstring"""
def find_duplicates(_SCREAMING_SNAKE_CASE : Any ):
return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_ : Union[str, Any] = []
for key in import_dict_objects.keys():
UpperCAmelCase_ : List[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
UpperCAmelCase_ : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_ : Optional[Any] = "base imports" if key == "none" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def a__ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
UpperCAmelCase_ : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , "__init__.py" )
UpperCAmelCase_ : Dict = parse_init(_SCREAMING_SNAKE_CASE )
if objects is not None:
UpperCAmelCase_ : str = analyze_results(*_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
UpperCAmelCase_ : Dict = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("\n\n".join(_SCREAMING_SNAKE_CASE ) )
def a__ ( ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(_SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase_ : Dict = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = short_path.replace(os.path.sep , "." )
submodules.append(_SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_ : List[Any] = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Union[str, Any] = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(_SCREAMING_SNAKE_CASE )
return submodules
_lowerCamelCase = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def a__ ( ) -> str:
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCAmelCase_ : Optional[int] = direct_transformers_import(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_SCREAMING_SNAKE_CASE , "__init__.py" ) , "r" ) as f:
UpperCAmelCase_ : Dict = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , _SCREAMING_SNAKE_CASE ) ) )
UpperCAmelCase_ : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
UpperCAmelCase_ : str = "\n".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 71 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCamelCase__ : Optional[int] = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[Any] = True
while ask_again:
lowercase__ : Tuple = input(lowercase_ )
try:
if default is not None and len(lowercase_ ) == 0:
return default
return convert_value(lowercase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ )
lowercase__ : Any = menu.run(default_choice=lowercase_ )
return convert_value(lowercase_ ) if convert_value is not None else result
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Union[str, Any] = int(lowercase_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[str] = int(lowercase_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : str = int(lowercase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _snake_case ( argparse.RawDescriptionHelpFormatter ):
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""")
return usage
| 12 | 0 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
_UpperCAmelCase : Dict = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Dict ) -> Tuple:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCamelCase ( lowercase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Optional[int] ) -> str:
'''simple docstring'''
lowercase =tmp_path_factory.getbasetemp() / '''cache'''
lowercase =test_hf_cache_home / '''datasets'''
lowercase =test_hf_cache_home / '''metrics'''
lowercase =test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
lowercase =test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
lowercase =test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def UpperCamelCase ( lowercase_ : List[Any] ) -> Any:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def UpperCamelCase ( lowercase_ : str ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 72 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
SCREAMING_SNAKE_CASE = torch.permute(_UpperCAmelCase , (0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase):
# linear layer
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
SCREAMING_SNAKE_CASE = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if "metadata" in layer:
SCREAMING_SNAKE_CASE = layer.split('metadata')
SCREAMING_SNAKE_CASE = ''.join(split_layer[0])[:-1]
SCREAMING_SNAKE_CASE = [tuple(('metadata' + split_layer[1]).split('/'))]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE = layer.split('kvstore')
SCREAMING_SNAKE_CASE = ''.join(split_layer[0])[:-1]
SCREAMING_SNAKE_CASE = [tuple(('kvstore' + split_layer[1]).split('/'))]
else:
SCREAMING_SNAKE_CASE = layer.split('/')
SCREAMING_SNAKE_CASE = '/'.join(split_layer[:-1])
SCREAMING_SNAKE_CASE = (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE = 'file'
else:
SCREAMING_SNAKE_CASE = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = rename_keys(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE = v
SCREAMING_SNAKE_CASE = new_current_block
torch.save(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = WEIGHTS_NAME):
SCREAMING_SNAKE_CASE = convert_file_size_to_int(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb') as fp:
SCREAMING_SNAKE_CASE = serialization.msgpack_restore(fp.read())['optimizer']['target']
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCAmelCase , sep='/')
SCREAMING_SNAKE_CASE = {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_key_and_tensorstore_dict(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE = content
else:
SCREAMING_SNAKE_CASE = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE = ts.open(unflatten_dict(all_layers[key])).result().read().result()
SCREAMING_SNAKE_CASE = torch.tensor(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = rename_base_flax_keys(tuple(key.split('/')) , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = '/'.join(_UpperCAmelCase)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase)+1:05d}-of-???.bin'''))
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase)
sharded_state_dicts.append(current_block.keys())
del current_block
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase))
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase)+1:05d}-of-???.bin'''))
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(_UpperCAmelCase) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for idx, shard in enumerate(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = weights_name.replace(
'.bin' , F'''-{idx+1:05d}-of-{len(_UpperCAmelCase):05d}.bin''') # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
SCREAMING_SNAKE_CASE = shard
for key in shard:
SCREAMING_SNAKE_CASE = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE = {'total_size': total_size}
SCREAMING_SNAKE_CASE = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase) , 'w' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase) + '\n'
f.write(_UpperCAmelCase)
return metadata, index
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
a_ : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase__ ():
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained('google/switch-base-8')
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted')
SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto')
SCREAMING_SNAKE_CASE = TaTokenizer.from_pretrained('t5-small')
SCREAMING_SNAKE_CASE = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
SCREAMING_SNAKE_CASE = tokenizer(_UpperCAmelCase , return_tensors='pt').input_ids
SCREAMING_SNAKE_CASE = model.generate(_UpperCAmelCase , decoder_start_token_id=0)
print(tokenizer.decode(out[0]))
| 73 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( UpperCAmelCase_ ):
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = []
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_init_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_evaluate""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_predict""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_save""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_log""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_)
return Trainer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
# Order doesn't matter
lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_)
else:
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = ["""on_init_end""", """on_train_begin"""]
lowercase__ : Union[str, Any] = 0
lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader())
lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(SCREAMING_SNAKE_CASE_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.get_trainer()
lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# Callbacks passed at init are added to the default callbacks
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : Tuple = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# We can also add, pop, or remove by instance
lowercase__ : Union[str, Any] = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# Independent log/save/eval
lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""")
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""")
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
lowercase__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
| 12 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a__ ( snake_case ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ ( ):
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = '''mock-s3-bucket'''
__SCREAMING_SNAKE_CASE : int = F'''s3://{mock_bucket}'''
__SCREAMING_SNAKE_CASE : Dict = extract_path_from_uri(snake_case )
assert dataset_path.startswith('''s3://''' ) is False
__SCREAMING_SNAKE_CASE : List[str] = '''./local/path'''
__SCREAMING_SNAKE_CASE : str = extract_path_from_uri(snake_case )
assert dataset_path == new_dataset_path
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = is_remote_filesystem(snake_case )
assert is_remote is True
__SCREAMING_SNAKE_CASE : int = fsspec.filesystem('''file''' )
__SCREAMING_SNAKE_CASE : str = is_remote_filesystem(snake_case )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , snake_case )
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
__SCREAMING_SNAKE_CASE : List[str] = input_paths[compression_fs_class.protocol]
if input_path is None:
__SCREAMING_SNAKE_CASE : Any = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case )
__SCREAMING_SNAKE_CASE : str = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case )
assert isinstance(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.basename(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(snake_case , '''r''' , encoding='''utf-8''' ) as f, open(snake_case , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
__SCREAMING_SNAKE_CASE : Dict = compressed_file_paths[protocol]
__SCREAMING_SNAKE_CASE : int = '''dataset.jsonl'''
__SCREAMING_SNAKE_CASE : int = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__SCREAMING_SNAKE_CASE, *__SCREAMING_SNAKE_CASE : Dict = fsspec.get_fs_token_paths(snake_case )
assert fs.isfile(snake_case )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = hf_api.dataset_info(snake_case , token=snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = HfFileSystem(repo_info=snake_case , token=snake_case )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(snake_case ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case , snake_case , clobber=snake_case )
with pytest.warns(snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 74 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = RoCBertTokenizer
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = filter_non_english
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = i
lowercase__ : Any = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowercase__ : Optional[int] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = i
lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
lowercase__ : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def lowercase__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ : List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False
lowercase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""的""", """人""", """有"""]
lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = False
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_)
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ : Optional[int] = """你好,你是谁"""
lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCamelCase__ = '''src/transformers'''
UpperCamelCase__ = '''docs/source/en'''
UpperCamelCase__ = '''.'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase__ : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCAmelCase__ : Union[str, Any] = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
UpperCAmelCase__ : str = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCamelCase__ = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
UpperCamelCase__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
UpperCamelCase__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCamelCase__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def a__ ( lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : Optional[int] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : int = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = (width - text_length) // 2
UpperCAmelCase__ : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase__ : int = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
UpperCAmelCase__ : Union[str, Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
UpperCAmelCase__ : str = collections.defaultdict(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = collections.defaultdict(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = collections.defaultdict(lowerCAmelCase__ )
UpperCAmelCase__ : Any = collections.defaultdict(lowerCAmelCase__ )
UpperCAmelCase__ : int = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = None
if attr_name.endswith('''Tokenizer''' ):
UpperCAmelCase__ : List[Any] = slow_tokenizers
UpperCAmelCase__ : Tuple = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
UpperCAmelCase__ : Dict = fast_tokenizers
UpperCAmelCase__ : Tuple = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
UpperCAmelCase__ : List[Any] = tf_models
UpperCAmelCase__ : Tuple = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
UpperCAmelCase__ : Optional[Any] = flax_models
UpperCAmelCase__ : str = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
UpperCAmelCase__ : int = pt_models
UpperCAmelCase__ : List[str] = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
UpperCAmelCase__ : Tuple = True
break
# Try again after removing the last word in the name
UpperCAmelCase__ : List[Any] = ''''''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
UpperCAmelCase__ : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
UpperCAmelCase__ : str = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
UpperCAmelCase__ : Tuple = [len(lowerCAmelCase__ ) + 2 for c in columns]
UpperCAmelCase__ : Any = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
UpperCAmelCase__ : Union[str, Any] = '''|''' + '''|'''.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
UpperCAmelCase__ : Dict = {True: '''✅''', False: '''❌'''}
for name in model_names:
UpperCAmelCase__ : Tuple = model_name_to_prefix[name]
UpperCAmelCase__ : int = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def a__ ( lowerCAmelCase__=False ) -> Union[str, Any]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
UpperCAmelCase__ : int = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 75 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase__ : Any = kwargs.pop("""text_queries""")
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)):
lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase__ : int = image
lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return results
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase__ : int = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = load_image(inputs["""image"""])
lowercase__ : Any = inputs["""candidate_labels"""]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = candidate_labels.split(""",""")
lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = model_inputs.pop("""target_size""")
lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""")
lowercase__ : Dict = model_inputs.pop("""is_last""")
lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for model_output in model_outputs:
lowercase__ : Optional[int] = model_output["""candidate_label"""]
lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0]
for index in outputs["scores"].nonzero():
lowercase__ : Optional[Any] = outputs["""scores"""][index].item()
lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0])
lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box}
results.append(SCREAMING_SNAKE_CASE_)
lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_)
if top_k:
lowercase__ : Any = results[:top_k]
return results
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""")
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 12 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =["pixel_values"]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None:
super().__init__(**UpperCamelCase_ )
__lowercase : List[str] = size if size is not None else {'''shortest_edge''': 2_56}
__lowercase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowercase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowercase : Dict = get_size_dict(UpperCamelCase_ )
__lowercase : Dict = do_resize
__lowercase : Optional[Any] = size
__lowercase : List[Any] = resample
__lowercase : Dict = do_center_crop
__lowercase : Any = crop_size
__lowercase : List[str] = do_rescale
__lowercase : List[str] = rescale_factor
__lowercase : Optional[Any] = do_normalize
__lowercase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray:
__lowercase : List[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowercase : List[Any] = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray:
__lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ) -> np.ndarray:
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray:
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ) -> Optional[Any]:
__lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Tuple = size if size is not None else self.size
__lowercase : Optional[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(UpperCamelCase_ )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : Any = image_std if image_std is not None else self.image_std
__lowercase : Any = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase : Optional[int] = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__lowercase : Tuple = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
__lowercase : Any = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
__lowercase : str = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__lowercase : Optional[int] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__lowercase : str = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowercase : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 76 |
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase__ : List[str] = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
lowercase__ : List[Any] = val
return f[i][j]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase__ : Tuple = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowercase__ : str = len(lowercase_ )
if num_items != len(lowercase_ ):
lowercase__ : Optional[int] = (
"""The number of weights must be the same as the number of values.\n"""
F'But got {num_items} weights and {len(lowercase_ )} values'
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
lowercase__ : int = (
"""All weights must be integers but got weight of """
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(lowercase_ )
lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : set = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : Dict = [3, 2, 4, 4]
lowerCamelCase__ : List[Any] = [4, 3, 2, 3]
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Dict = 6
lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 12 | 0 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 77 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : int = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" )
lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" )
lowercase__ : str = value.float()
for key, value in codebook_state_dict.items():
lowercase__ : Any = value
return upgrade
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ )
else:
lowercase__ : Optional[int] = FlavaConfig()
lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval()
lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ )
if os.path.exists(lowercase_ ):
lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" )
else:
lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ )
hf_model.load_state_dict(lowercase_ )
lowercase__ : Optional[int] = hf_model.state_dict()
lowercase__ : Optional[int] = count_parameters(lowercase_ )
lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ : List[str] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 12 | 0 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __A :
def __init__(self : str , __a : Any ):
UpperCAmelCase_ = data
UpperCAmelCase_ = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowercase (__a : List[str] , __a : str ):
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowercase (self : List[str] ):
UpperCAmelCase_ = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def _lowercase (self : Optional[int] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowercase (self : Union[str, Any] , __a : Optional[int] ):
UpperCAmelCase_ = list(struct.unpack(">16L" , __a ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.padding()
UpperCAmelCase_ = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ = self.expand_block(__a )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ = (b & c) | ((~b) & d)
UpperCAmelCase_ = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
self.rotate(__a , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__a , 30 ),
c,
d,
)
UpperCAmelCase_ = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = B"Test String"
assert SHAaHash(snake_case_ ).final_hash() == hashlib.shaa(snake_case_ ).hexdigest() # noqa: S324
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCAmelCase_ = f.read()
else:
UpperCAmelCase_ = bytes(snake_case_ , "utf-8" )
print(SHAaHash(snake_case_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 78 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : str = image_size
lowercase__ : int = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Union[str, Any] = size
lowercase__ : Any = do_normalize
lowercase__ : Tuple = image_mean
lowercase__ : str = image_std
def lowercase__ ( self):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = EfficientFormerImageProcessorTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size"""))
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 12 | 0 |
import math
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase=0 ): # a graph with Node 0,1,...,N-1
UpperCAmelCase__ : Union[str, Any] = n
UpperCAmelCase__ : Union[str, Any] = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
UpperCAmelCase__ : Optional[Any] = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Dict = w
def __UpperCAmelCase ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCAmelCase__ : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 79 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : int , _lowerCAmelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> List[str]:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList(_lowerCAmelCase )
def _a ( self : Dict , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Union[torch.Tensor, float, int] , _lowerCAmelCase : torch.Tensor , _lowerCAmelCase : List[torch.tensor] , _lowerCAmelCase : List[float] , _lowerCAmelCase : Optional[torch.Tensor] = None , _lowerCAmelCase : Optional[torch.Tensor] = None , _lowerCAmelCase : Optional[torch.Tensor] = None , _lowerCAmelCase : Optional[Dict[str, Any]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , ) -> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(_lowerCAmelCase , _lowerCAmelCase , self.nets ) ):
__lowercase , __lowercase = controlnet(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
# merge samples
if i == 0:
__lowercase , __lowercase = down_samples, mid_sample
else:
__lowercase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_lowerCAmelCase , _lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _a ( self : List[Any] , _lowerCAmelCase : Union[str, os.PathLike] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Callable = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , ) -> str:
"""simple docstring"""
__lowercase = 0
__lowercase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_lowerCAmelCase , is_main_process=_lowerCAmelCase , save_function=_lowerCAmelCase , safe_serialization=_lowerCAmelCase , variant=_lowerCAmelCase , )
idx += 1
__lowercase = model_path_to_save + F'_{idx}'
@classmethod
def _a ( cls : List[Any] , _lowerCAmelCase : Optional[Union[str, os.PathLike]] , **_lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = 0
__lowercase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowercase = pretrained_model_path
while os.path.isdir(_lowerCAmelCase ):
__lowercase = ControlNetModel.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
controlnets.append(_lowerCAmelCase )
idx += 1
__lowercase = pretrained_model_path + F'_{idx}'
logger.info(F'{len(_lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.' )
if len(_lowerCAmelCase ) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(_lowerCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(_lowerCAmelCase )
| 80 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
raise RuntimeError("""CUDA out of memory.""" )
class _snake_case ( nn.Module ):
def __init__( self):
'''simple docstring'''
super().__init__()
lowercase__ : Optional[Any] = nn.Linear(3 , 4)
lowercase__ : Union[str, Any] = nn.BatchNormad(4)
lowercase__ : str = nn.Linear(4 , 5)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_)))
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, """hello"""])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function(1_28 , """hello""" , """world""")
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0])
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
raise ValueError("""Oops, we had an error!""")
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0])
@require_cuda
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = torch.cuda.memory_allocated()
lowercase__ : str = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_)
self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
| 12 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["pixel_values"]
def __init__( self : List[Any] , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : int = 8 , **lowerCamelCase : Tuple , ) -> None:
super().__init__(**lowerCamelCase )
__snake_case : Dict = do_rescale
__snake_case : Dict = rescale_factor
__snake_case : Optional[Any] = do_pad
__snake_case : Tuple = pad_size
def __snake_case ( self : Dict , lowerCamelCase : np.ndarray , lowerCamelCase : float , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Optional[int] ) -> np.ndarray:
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : int , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ) -> Tuple:
__snake_case , __snake_case : List[str] = get_image_size(lowerCamelCase )
__snake_case : Optional[Any] = (old_height // size + 1) * size - old_height
__snake_case : List[Any] = (old_width // size + 1) * size - old_width
return pad(lowerCamelCase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : ImageInput , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[float] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case : int = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : str = do_pad if do_pad is not None else self.do_pad
__snake_case : Any = pad_size if pad_size is not None else self.pad_size
__snake_case : int = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__snake_case : str = [to_numpy_array(lowerCamelCase ) for image in images]
if do_rescale:
__snake_case : Optional[int] = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_pad:
__snake_case : Optional[Any] = [self.pad(lowerCamelCase , size=lowerCamelCase ) for image in images]
__snake_case : int = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__snake_case : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 81 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ : Optional[int] = 4
lowercase__ : Optional[Any] = 48
lowercase__ : int = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : List[str] = [6, 6, 6, 6]
lowercase__ : Any = 60
lowercase__ : Tuple = [6, 6, 6, 6]
lowercase__ : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = 4
lowercase__ : Any = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ : str = 1
lowercase__ : Optional[int] = 1
lowercase__ : Optional[int] = 1_26
lowercase__ : Any = 7
lowercase__ : int = 255.0
lowercase__ : List[Any] = """"""
return config
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase__ : List[str] = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase__ : Optional[int] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase__ : int = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase__ : Any = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase__ : Optional[Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase__ : Dict = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase__ : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase__ : List[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase__ : Union[str, Any] = """layernorm.weight"""
if name == "norm.bias":
lowercase__ : List[str] = """layernorm.bias"""
if "conv_first" in name:
lowercase__ : Union[str, Any] = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ : List[Any] = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ : Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase__ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase__ : Optional[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase__ : List[str] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase__ : int = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase__ : str = """swin2sr.""" + name
return name
def UpperCamelCase ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase__ : Any = key.split(""".""" )
lowercase__ : List[Any] = int(key_split[1] )
lowercase__ : Dict = int(key_split[4] )
lowercase__ : Optional[Any] = config.embed_dim
if "weight" in key:
lowercase__ : List[str] = val[:dim, :]
lowercase__ : List[str] = val[dim : dim * 2, :]
lowercase__ : Optional[Any] = val[-dim:, :]
else:
lowercase__ : Optional[Any] = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
pass
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Dict = get_config(lowercase_ )
lowercase__ : Any = SwinaSRForImageSuperResolution(lowercase_ )
model.eval()
lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : Union[str, Any] = convert_state_dict(lowercase_ , lowercase_ )
lowercase__ , lowercase__ : Dict = model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowercase__ : Any = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert("""RGB""" )
lowercase__ : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ : Optional[int] = 1_26 if """Jpeg""" in checkpoint_url else 2_56
lowercase__ : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : Dict = transforms(lowercase_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ : Union[str, Any] = model(lowercase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ : Optional[Any] = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : List[str] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 5_12, 5_12] )
lowercase__ : int = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Tuple = torch.Size([1, 3, 10_24, 10_24] )
lowercase__ : int = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
lowercase__ : str = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase__ : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
lowerCamelCase__ : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 12 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowercase__ :
'''simple docstring'''
def __init__( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = psutil.Process()
UpperCAmelCase_ = False
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = -1
while True:
UpperCAmelCase_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = threading.Thread(target=self.peak_monitor )
UpperCAmelCase_ = True
self.thread.start()
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase = PeakCPUMemory()
def a__ ( ):
# Time
UpperCAmelCase_ = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ = torch.cuda.memory_allocated(lowerCAmelCase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def a__ ( lowerCAmelCase__ ):
# Time
UpperCAmelCase_ = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
UpperCAmelCase_ = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ = (torch.cuda.memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**20
UpperCAmelCase_ = (torch.cuda.max_memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**20
return measures
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
print(f"""{description}:""" )
print(f"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(lowerCAmelCase__ )]:.2f}MiB""" )
UpperCAmelCase_ = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 82 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype = jnp.floataa
__lowerCAmelCase : bool = True
def lowercase__ ( self):
'''simple docstring'''
super().setup()
lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
lowercase__ : int = logits.shape[-1]
lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" )
lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 )
lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase__ : Optional[int] = reduction(lowercase_ )
return loss
lowercase__ : int = partial(lowercase_ , reduction=jnp.mean )
lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _snake_case :
__lowerCAmelCase : str = "google/bigbird-roberta-base"
__lowerCAmelCase : int = 3_000
__lowerCAmelCase : int = 10_500
__lowerCAmelCase : int = 128
__lowerCAmelCase : int = 3
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 5
# tx_args
__lowerCAmelCase : float = 3e-5
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 20_000
__lowerCAmelCase : float = 0.0_095
__lowerCAmelCase : str = "bigbird-roberta-natural-questions"
__lowerCAmelCase : str = "training-expt"
__lowerCAmelCase : str = "data/nq-training.jsonl"
__lowerCAmelCase : str = "data/nq-validation.jsonl"
def lowercase__ ( self):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = os.path.join(self.base_dir , self.save_dir)
lowercase__ : str = self.batch_size_per_device * jax.device_count()
@dataclass
class _snake_case :
__lowerCAmelCase : int
__lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""])
lowercase__ : str = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))]
while len(SCREAMING_SNAKE_CASE_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
if seed is not None:
lowercase__ : Any = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
def loss_fn(lowercase_ ):
lowercase__ : Dict = model_inputs.pop("""start_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""end_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Any = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ )
lowercase__ : Tuple = jax.value_and_grad(lowercase_ )
lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params )
lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" )
lowercase__ : str = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Tuple = model_inputs.pop("""start_labels""" )
lowercase__ : List[str] = model_inputs.pop("""end_labels""" )
lowercase__ : int = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs
lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class _snake_case ( train_state.TrainState ):
__lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ )
@dataclass
class _snake_case :
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable = None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : List[str] = model.params
lowercase__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[Any] = args
lowercase__ : Union[str, Any] = data_collator
lowercase__ : str = lr
lowercase__ : Union[str, Any] = params
lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_)
return state
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.args
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size
lowercase__ : int = jax.random.PRNGKey(0)
lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count())
for epoch in range(args.max_epochs):
lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
lowercase__ : List[str] = jax_utils.unreplicate(state.step)
lowercase__ : str = running_loss.item() / i
lowercase__ : Tuple = self.scheduler_fn(state_step - 1)
lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_))
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size)
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size
lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_)
print("""DONE""")
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase__ : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase__ : Dict = from_bytes(state.opt_state , f.read() )
lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) )
lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) )
with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Optional[Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = num_train_steps - warmup_steps
lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ )
lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
def weight_decay_mask(lowercase_ ):
lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ )
lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 12 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowerCAmelCase__ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowerCAmelCase__ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] = PRETRAINED_INIT_CONFIGURATION
snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ConvBertTokenizer
def __init__( self : Dict , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=True , __lowerCAmelCase : Dict="[UNK]" , __lowerCAmelCase : Any="[SEP]" , __lowerCAmelCase : Dict="[PAD]" , __lowerCAmelCase : Any="[CLS]" , __lowerCAmelCase : Tuple="[MASK]" , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenize_chinese_chars=__lowerCAmelCase , strip_accents=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowerCAmelCase ) != tokenize_chinese_chars
):
_lowerCamelCase : int = getattr(__lowerCAmelCase , normalizer_state.pop('''type''' ) )
_lowerCamelCase : Tuple = do_lower_case
_lowerCamelCase : str = strip_accents
_lowerCamelCase : List[str] = tokenize_chinese_chars
_lowerCamelCase : Dict = normalizer_class(**__lowerCAmelCase )
_lowerCamelCase : str = do_lower_case
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 83 |
lowerCamelCase__ : List[str] = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase__ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 12 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
UpperCAmelCase = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCAmelCase = BASE_URL + '''/user'''
# https://github.com/settings/tokens
UpperCAmelCase = os.environ.get('''USER_TOKEN''', '''''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {
'Authorization': F'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 84 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ):
'''simple docstring'''
lowercase__ : str = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Union[str, Any] = is_training
lowercase__ : Any = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Any = rotary_dim
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Optional[int] = initializer_range
lowercase__ : Optional[int] = None
lowercase__ : str = vocab_size - 1
lowercase__ : Any = vocab_size - 1
lowercase__ : Dict = vocab_size - 1
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ : List[Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = 20
lowercase__ : int = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase__ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : List[str] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : str = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = 20
lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
lowercase__ : Dict = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
lowercase__ : Any = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
@require_flax
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCAmelCase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = FlaxGPTJModelTester(self)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""")
lowercase__ : List[str] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : Optional[Any] = False
lowercase__ : List[str] = model.config.eos_token_id
lowercase__ : List[Any] = jax.jit(model.generate)
lowercase__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id).sequences
lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ : Dict = pt_inputs["""input_ids"""].shape
lowercase__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : str = 0
lowercase__ : List[Any] = 1
lowercase__ : Dict = 0
lowercase__ : Any = 1
lowercase__ : List[Any] = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = fx_state
with torch.no_grad():
lowercase__ : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Dict = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : str = fx_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
lowercase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = pt_model_class(SCREAMING_SNAKE_CASE_).eval()
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa)
lowercase__ : Optional[int] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params)
lowercase__ , lowercase__ : str = pt_inputs["""input_ids"""].shape
lowercase__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = 0
lowercase__ : int = 1
lowercase__ : str = 0
lowercase__ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase__ : Dict = pt_model(**SCREAMING_SNAKE_CASE_).to_tuple()
lowercase__ : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_)
with torch.no_grad():
lowercase__ : Tuple = pt_model_loaded(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_) , """Output lengths differ between Flax and PyTorch""")
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ : Any = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""")
lowercase__ : int = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
| 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : str = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['image_processor', 'tokenizer']
__lowerCAmelCase : Union[str, Any] = 'AutoImageProcessor'
__lowerCAmelCase : int = 'AutoTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
lowercase__ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if images is not None:
lowercase__ : Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if text is not None and images is not None:
lowercase__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
@property
def lowercase__ ( self):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 12 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__a :Union[str, Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Union[str, Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , UpperCAmelCase , )
super().__init__(args=UpperCAmelCase , **UpperCAmelCase ) | 86 |
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
lowercase__ : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = 2
while digits < n:
index += 1
lowercase__ : str = len(str(fibonacci(lowercase_ ) ) )
return index
def UpperCamelCase ( lowercase_ = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 12 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=99 , UpperCAmelCase__ : Optional[Any]=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Union[str, Any]=9 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Dict=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=None , ) ->Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = encoder_seq_length
A__ = decoder_seq_length
# For common tests
A__ = self.decoder_seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = d_ff
A__ = relative_attention_num_buckets
A__ = dropout_rate
A__ = initializer_factor
A__ = eos_token_id
A__ = pad_token_id
A__ = decoder_start_token_id
A__ = None
A__ = decoder_layers
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
return TaConfig.from_pretrained('''google/umt5-base''')
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=None , ) ->Optional[Any]:
'''simple docstring'''
if attention_mask is None:
A__ = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
A__ = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
A__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__)
if decoder_head_mask is None:
A__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__)
if cross_attn_head_mask is None:
A__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
A__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A__ = input_ids.clamp(self.pad_token_id + 1)
A__ = decoder_input_ids.clamp(self.pad_token_id + 1)
A__ = self.get_config()
A__ = config.num_attention_heads
A__ = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
return config, input_dict
def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
'''simple docstring'''
A__ , A__ = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
A__ = UMTaModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
A__ = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__)
A__ = result.last_hidden_state
A__ = result.past_key_values
A__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
A__ = UMTaModel(config=UpperCAmelCase__).get_decoder().to(UpperCAmelCase__).eval()
# first forward pass
A__ = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__)
self.parent.assertTrue(len(UpperCAmelCase__) == len(UpperCAmelCase__))
self.parent.assertTrue(len(UpperCAmelCase__) == len(UpperCAmelCase__) + 1)
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1)
A__ = model(UpperCAmelCase__)['''last_hidden_state''']
A__ = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__)['''last_hidden_state''']
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1]).item()
A__ = output_from_no_past[:, -1, random_slice_idx].detach()
A__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , ) ->Union[str, Any]:
'''simple docstring'''
A__ = UMTaModel(config=UpperCAmelCase__).to(UpperCAmelCase__).half().eval()
A__ = model(**UpperCAmelCase__)['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__).any().item())
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def SCREAMING_SNAKE_CASE ( self : int) ->Dict:
'''simple docstring'''
A__ = UMTaModelTester(self)
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''')
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
A__ = UMTaModel(config_and_inputs[0]).to(UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
A__ = self.model_tester.prepare_config_and_inputs()
A__ = config_and_inputs[0]
A__ = UMTaForConditionalGeneration(UpperCAmelCase__).eval()
model.to(UpperCAmelCase__)
A__ = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items()):
A__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
A__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__)
A__ = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
A__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''')
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__).to(UpperCAmelCase__)
A__ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__)
A__ = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
A__ = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__).input_ids
# fmt: off
A__ = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
])
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__)
A__ = model.generate(input_ids.to(UpperCAmelCase__))
A__ = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
A__ = tokenizer.batch_decode(UpperCAmelCase__)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
| 87 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase__ : Any = """Create a default config file for Accelerate with only a few flags set."""
def UpperCamelCase ( lowercase_="no" , lowercase_ = default_json_config_file , lowercase_ = False ) -> Any:
'''simple docstring'''
lowercase__ : Any = Path(lowercase_ )
path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowercase__ : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowercase__ : Dict = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowercase__ : Any = torch.cuda.device_count()
lowercase__ : Any = num_gpus
lowercase__ : Optional[int] = False
if num_gpus > 1:
lowercase__ : Tuple = """MULTI_GPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_xpu_available() and use_xpu:
lowercase__ : Union[str, Any] = torch.xpu.device_count()
lowercase__ : str = num_xpus
lowercase__ : List[Any] = False
if num_xpus > 1:
lowercase__ : str = """MULTI_XPU"""
else:
lowercase__ : Optional[Any] = """NO"""
elif is_npu_available():
lowercase__ : Tuple = torch.npu.device_count()
lowercase__ : Union[str, Any] = num_npus
lowercase__ : Union[str, Any] = False
if num_npus > 1:
lowercase__ : List[Any] = """MULTI_NPU"""
else:
lowercase__ : int = """NO"""
else:
lowercase__ : Union[str, Any] = 0
lowercase__ : str = True
lowercase__ : Union[str, Any] = 1
lowercase__ : int = """NO"""
lowercase__ : Tuple = ClusterConfig(**lowercase_ )
config.to_json_file(lowercase_ )
return path
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[str] = parser.add_parser("""default""" , parents=lowercase_ , help=lowercase_ , formatter_class=lowercase_ )
parser.add_argument(
"""--config_file""" , default=lowercase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowercase_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=lowercase_ )
return parser
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 12 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = 'convbert'
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : Tuple = embedding_size
lowercase__ : List[str] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Dict = num_groups
lowercase__ : int = classifier_dropout
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 12 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
SCREAMING_SNAKE_CASE : int = "hf-internal-testing/tiny-random-bert"
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
SCREAMING_SNAKE_CASE : List[Any] = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = cached_file(lowerCamelCase, lowerCamelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCamelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCamelCase, lowerCamelCase)))
with open(os.path.join(lowerCamelCase, 'refs', 'main')) as f:
_lowercase : Optional[int] = f.read()
self.assertEqual(lowerCamelCase, os.path.join(lowerCamelCase, 'snapshots', lowerCamelCase, lowerCamelCase))
self.assertTrue(os.path.isfile(lowerCamelCase))
# File is cached at the same place the second time.
_lowercase : Any = cached_file(lowerCamelCase, lowerCamelCase)
self.assertEqual(lowerCamelCase, lowerCamelCase)
# Using a specific revision to test the full commit hash.
_lowercase : Dict = cached_file(lowerCamelCase, lowerCamelCase, revision='9b8c223')
self.assertEqual(lowerCamelCase, os.path.join(lowerCamelCase, 'snapshots', lowerCamelCase, lowerCamelCase))
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid model identifier'):
_lowercase : Tuple = cached_file('tiny-random-bert', lowerCamelCase)
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid git identifier'):
_lowercase : Optional[Any] = cached_file(lowerCamelCase, lowerCamelCase, revision='aaaa')
with self.assertRaisesRegex(lowerCamelCase, 'does not appear to have a file named'):
_lowercase : List[str] = cached_file(lowerCamelCase, 'conf')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase, 'does not appear to have a file named'):
_lowercase : str = cached_file(lowerCamelCase, 'conf')
with open(os.path.join(lowerCamelCase, 'refs', 'main')) as f:
_lowercase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase, '.no_exist', lowerCamelCase, 'conf')))
_lowercase : List[Any] = cached_file(lowerCamelCase, 'conf', _raise_exceptions_for_missing_entries=lowerCamelCase)
self.assertIsNone(lowerCamelCase)
_lowercase : Optional[Any] = cached_file(lowerCamelCase, 'conf', local_files_only=lowerCamelCase, _raise_exceptions_for_missing_entries=lowerCamelCase)
self.assertIsNone(lowerCamelCase)
_lowercase : Dict = mock.Mock()
_lowercase : Any = 5_00
_lowercase : Optional[int] = {}
_lowercase : Optional[int] = HTTPError
_lowercase : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request', return_value=lowerCamelCase) as mock_head:
_lowercase : Dict = cached_file(lowerCamelCase, 'conf', _raise_exceptions_for_connection_errors=lowerCamelCase)
self.assertIsNone(lowerCamelCase)
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only', lowerCamelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only', lowerCamelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only', lowerCamelCase))
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.assertIsNone(get_file_from_repo('bert-base-cased', 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid model identifier'):
get_file_from_repo('bert-base-case', lowerCamelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCamelCase, 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased', lowerCamelCase, revision='ahaha')
_lowercase : int = get_file_from_repo('bert-base-cased', lowerCamelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowercase : Tuple = json.loads(open(lowerCamelCase, 'r').read())
self.assertEqual(config['hidden_size'], 7_68)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : Tuple = Path(lowerCamelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(lowerCamelCase, 'a.txt'), str(lowerCamelCase))
self.assertIsNone(get_file_from_repo(lowerCamelCase, 'b.txt'))
| 89 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCAmelCase : bool = None
__lowerCAmelCase : bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCAmelCase : Optional[Any] = datasets.Audio()
__lowerCAmelCase : Union[str, Any] = 'audio'
__lowerCAmelCase : str = AudioFolderConfig
__lowerCAmelCase : List[str] # definition at the bottom of the script
__lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' )
lowerCamelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCamelCase__ : int = AUDIO_EXTENSIONS
| 12 | 0 |
'''simple docstring'''
def _snake_case ( A ) -> Any:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
lowerCAmelCase__ = len(A ) if (len(A ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(A ) , '''Postfix'''.center(A ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(A ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(A ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(A ) == 0:
stack.append(A ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(A ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(A ) # push x to stack
print(
x.center(8 ) , (''''''.join(A )).ljust(A ) , (''''''.join(A )).ljust(A ) , sep=''' | ''' , ) # Output in tabular format
while len(A ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(A )).ljust(A ) , (''''''.join(A )).ljust(A ) , sep=''' | ''' , ) # Output in tabular format
return "".join(A ) # return Postfix as str
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(A ) ):
if infix[i] == "(":
lowerCAmelCase__ = ''')''' # change "(" to ")"
elif infix[i] == ")":
lowerCAmelCase__ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(A ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__UpperCAmelCase = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''') | 90 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : int = (DDPMScheduler,)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.dummy_model()
lowercase__ : List[Any] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : str = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""")
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Tuple = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE_):
if i == len(SCREAMING_SNAKE_CASE_) - 1:
lowercase__ : Optional[int] = -1
else:
lowercase__ : Tuple = timesteps[i + 1]
lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_)
lowercase__ : int = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = [1_00, 87, 50, 1, 0]
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12 | 0 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments | 91 |
def UpperCamelCase ( lowercase_ ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowercase__ : int = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'beit'
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any=8192 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : str=3072 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : str=1E-12 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : Optional[Any]=16 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[Any]=[3, 5, 7, 11] , UpperCAmelCase__ : List[str]=[1, 2, 3, 6] , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=0.4 , UpperCAmelCase__ : List[Any]=256 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=255 , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : List[str] =vocab_size
lowercase : str =hidden_size
lowercase : Any =num_hidden_layers
lowercase : List[str] =num_attention_heads
lowercase : List[Any] =intermediate_size
lowercase : Optional[int] =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Any =attention_probs_dropout_prob
lowercase : List[str] =initializer_range
lowercase : int =layer_norm_eps
lowercase : str =image_size
lowercase : Union[str, Any] =patch_size
lowercase : Tuple =num_channels
lowercase : List[Any] =use_mask_token
lowercase : Optional[int] =use_absolute_position_embeddings
lowercase : Any =use_relative_position_bias
lowercase : int =use_shared_relative_position_bias
lowercase : str =layer_scale_init_value
lowercase : int =drop_path_rate
lowercase : Optional[int] =use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase : Dict =out_indices
lowercase : int =pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase : Optional[Any] =use_auxiliary_head
lowercase : Optional[int] =auxiliary_loss_weight
lowercase : Optional[int] =auxiliary_channels
lowercase : Any =auxiliary_num_convs
lowercase : Tuple =auxiliary_concat_input
lowercase : List[Any] =semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return 1E-4
| 92 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : Tuple = do_pad
lowercase__ : Optional[Any] = pad_size
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase__ : str = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[Any] = pad_size if pad_size is not None else self.pad_size
lowercase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
# All transformations expect numpy arrays.
lowercase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images]
if do_pad:
lowercase__ : List[str] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Dict = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 12 | 0 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_SCREAMING_SNAKE_CASE )[0]
@deprecated(_SCREAMING_SNAKE_CASE , 'Please use tf.data to implement this functionality.' )
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream:
lowerCAmelCase__ :List[Any] = _readaa(_SCREAMING_SNAKE_CASE )
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowerCAmelCase__ :List[Any] = _readaa(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = _readaa(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = _readaa(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = bytestream.read(rows * cols * num_images )
lowerCAmelCase__ :List[str] = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
lowerCAmelCase__ :int = data.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
return data
@deprecated(_SCREAMING_SNAKE_CASE , 'Please use tf.one_hot on tensors.' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Dict = labels_dense.shape[0]
lowerCAmelCase__ :List[str] = numpy.arange(_SCREAMING_SNAKE_CASE ) * num_classes
lowerCAmelCase__ :Optional[Any] = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase__ :Union[str, Any] = 1
return labels_one_hot
@deprecated(_SCREAMING_SNAKE_CASE , 'Please use tf.data to implement this functionality.' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=10 ) ->Tuple:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=_SCREAMING_SNAKE_CASE ) as bytestream:
lowerCAmelCase__ :Union[str, Any] = _readaa(_SCREAMING_SNAKE_CASE )
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowerCAmelCase__ :Union[str, Any] = _readaa(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = bytestream.read(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = numpy.frombuffer(_SCREAMING_SNAKE_CASE , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return labels
class _lowerCAmelCase :
"""simple docstring"""
@deprecated(
__UpperCAmelCase , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=dtypes.floataa , __UpperCAmelCase=True , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = random_seed.get_seed(__UpperCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase__ :List[Any] = dtypes.as_dtype(__UpperCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowerCAmelCase__ :Any = 1_0_0_0_0
lowerCAmelCase__ :Union[str, Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase__ :Dict = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase__ :Dict = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase__ :List[Any] = images.astype(numpy.floataa )
lowerCAmelCase__ :Optional[int] = numpy.multiply(__UpperCAmelCase , 1.0 / 2_55.0 )
lowerCAmelCase__ :Union[str, Any] = images
lowerCAmelCase__ :Any = labels
lowerCAmelCase__ :Any = 0
lowerCAmelCase__ :Union[str, Any] = 0
@property
def snake_case ( self ):
'''simple docstring'''
return self._images
@property
def snake_case ( self ):
'''simple docstring'''
return self._labels
@property
def snake_case ( self ):
'''simple docstring'''
return self._num_examples
@property
def snake_case ( self ):
'''simple docstring'''
return self._epochs_completed
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
'''simple docstring'''
if fake_data:
lowerCAmelCase__ :List[str] = [1] * 7_8_4
lowerCAmelCase__ :Any = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__UpperCAmelCase )],
[fake_label for _ in range(__UpperCAmelCase )],
)
lowerCAmelCase__ :List[str] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase__ :Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
lowerCAmelCase__ :str = self.images[perma]
lowerCAmelCase__ :Optional[int] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase__ :int = self._num_examples - start
lowerCAmelCase__ :Any = self._images[start : self._num_examples]
lowerCAmelCase__ :Dict = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase__ :Any = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = self.images[perm]
lowerCAmelCase__ :Optional[int] = self.labels[perm]
# Start next epoch
lowerCAmelCase__ :Any = 0
lowerCAmelCase__ :Dict = batch_size - rest_num_examples
lowerCAmelCase__ :Optional[Any] = self._index_in_epoch
lowerCAmelCase__ :Tuple = self._images[start:end]
lowerCAmelCase__ :int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase__ :Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_SCREAMING_SNAKE_CASE , 'Please write your own downloading logic.' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if not gfile.Exists(_SCREAMING_SNAKE_CASE ):
gfile.MakeDirs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not gfile.Exists(_SCREAMING_SNAKE_CASE ):
urllib.request.urlretrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # noqa: S310
with gfile.GFile(_SCREAMING_SNAKE_CASE ) as f:
lowerCAmelCase__ :Optional[Any] = f.size()
print('Successfully downloaded' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'bytes.' )
return filepath
@deprecated(
_SCREAMING_SNAKE_CASE , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=dtypes.floataa , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5000 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=DEFAULT_SOURCE_URL , ) ->Tuple:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = fake()
lowerCAmelCase__ :Optional[int] = fake()
lowerCAmelCase__ :List[str] = fake()
return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE )
if not source_url: # empty string check
lowerCAmelCase__ :List[str] = DEFAULT_SOURCE_URL
lowerCAmelCase__ :List[str] = 'train-images-idx3-ubyte.gz'
lowerCAmelCase__ :Dict = 'train-labels-idx1-ubyte.gz'
lowerCAmelCase__ :Dict = 't10k-images-idx3-ubyte.gz'
lowerCAmelCase__ :Tuple = 't10k-labels-idx1-ubyte.gz'
lowerCAmelCase__ :Dict = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_images_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowerCAmelCase__ :List[str] = _extract_images(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + train_labels_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowerCAmelCase__ :Tuple = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_images_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowerCAmelCase__ :int = _extract_images(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = _maybe_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , source_url + test_labels_file )
with gfile.Open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowerCAmelCase__ :Dict = _extract_labels(_SCREAMING_SNAKE_CASE , one_hot=_SCREAMING_SNAKE_CASE )
if not 0 <= validation_size <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = (
'Validation size should be between 0 and '
F"{len(_SCREAMING_SNAKE_CASE )}. Received: {validation_size}."
)
raise ValueError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = train_images[:validation_size]
lowerCAmelCase__ :Any = train_labels[:validation_size]
lowerCAmelCase__ :Union[str, Any] = train_images[validation_size:]
lowerCAmelCase__ :Union[str, Any] = train_labels[validation_size:]
lowerCAmelCase__ :Any = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowerCAmelCase__ :Optional[int] = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = _DataSet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return _Datasets(train=_SCREAMING_SNAKE_CASE , validation=_SCREAMING_SNAKE_CASE , test=_SCREAMING_SNAKE_CASE )
| 93 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCamelCase__ : Optional[int] = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def UpperCamelCase ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[Any] = True
while ask_again:
lowercase__ : Tuple = input(lowercase_ )
try:
if default is not None and len(lowercase_ ) == 0:
return default
return convert_value(lowercase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_=[] , lowercase_=None , lowercase_=0 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = BulletMenu(lowercase_ , lowercase_ )
lowercase__ : Any = menu.run(default_choice=lowercase_ )
return convert_value(lowercase_ ) if convert_value is not None else result
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Union[str, Any] = int(lowercase_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[str] = int(lowercase_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : str = int(lowercase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : List[Any] = int(lowercase_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _snake_case ( argparse.RawDescriptionHelpFormatter ):
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = usage.replace("""<command> [<args>] """ , """""")
return usage
| 12 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
lowerCamelCase_ = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCamelCase_ (__A ):
__magic_name__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__magic_name__ = '''nezha'''
def __init__( self : str , lowerCAmelCase_ : str=21_128 , lowerCAmelCase_ : Dict=768 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=3_072 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : List[str]=512 , lowerCAmelCase_ : Union[str, Any]=64 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : List[str]=True , **lowerCAmelCase_ : str , ) -> Union[str, Any]:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : List[Any] = max_relative_position
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Dict = classifier_dropout
UpperCAmelCase_ : Union[str, Any] = use_cache
| 95 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( UpperCAmelCase_ ):
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = []
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_init_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_evaluate""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_predict""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_save""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_log""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_)
return Trainer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
# Order doesn't matter
lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_)
else:
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = ["""on_init_end""", """on_train_begin"""]
lowercase__ : Union[str, Any] = 0
lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader())
lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(SCREAMING_SNAKE_CASE_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.get_trainer()
lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# Callbacks passed at init are added to the default callbacks
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : Tuple = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# We can also add, pop, or remove by instance
lowercase__ : Union[str, Any] = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# Independent log/save/eval
lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""")
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""")
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
lowercase__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
| 12 | 0 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : str ) -> List[Any]:
__magic_name__: Dict = logging.get_logger()
# the current default level is logging.WARNING
__magic_name__: List[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__snake_case )
def lowerCamelCase__ ( self : Any ) -> str:
__magic_name__: str = logging.get_verbosity()
__magic_name__: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__magic_name__: int = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(__snake_case )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__magic_name__: str = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__magic_name__: List[str] = os.getenv("""TRANSFORMERS_VERBOSITY""" , __snake_case )
__magic_name__: Any = logging.log_levels[env_level_str]
__magic_name__: Optional[Any] = logging.get_verbosity()
self.assertEqual(
__snake_case , __snake_case , F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
__magic_name__: Optional[Any] = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCamelCase__ ( self : str ) -> Optional[int]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__magic_name__: Tuple = logging.logging.getLogger()
with CaptureLogger(__snake_case ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCamelCase__ ( self : Tuple ) -> int:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__magic_name__: Union[str, Any] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__magic_name__: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__snake_case ) as cl:
logger.warning_advice(__snake_case )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__snake_case ) as cl:
logger.warning_advice(__snake_case )
self.assertEqual(cl.out , msg + """\n""" )
def a ( ) -> Any:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 96 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = RoCBertTokenizer
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = filter_non_english
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = i
lowercase__ : Any = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowercase__ : Optional[int] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = i
lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
lowercase__ : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def lowercase__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ : List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False
lowercase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""的""", """人""", """有"""]
lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = False
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_)
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ : Optional[int] = """你好,你是谁"""
lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'gptsan-japanese'
a :Dict = [
'past_key_values',
]
a :List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Tuple=3_6_0_0_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2_8_0 , SCREAMING_SNAKE_CASE_ : Dict=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8_1_9_2 , SCREAMING_SNAKE_CASE_ : str=4_0_9_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE_ : Any=1_0 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : str=1_6 , SCREAMING_SNAKE_CASE_ : int=1_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-5 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]="float32" , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Any=0.0_02 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : List[Any]=3_5_9_9_8 , SCREAMING_SNAKE_CASE_ : str=3_5_9_9_5 , SCREAMING_SNAKE_CASE_ : Any=3_5_9_9_9 , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> int:
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = d_model
lowercase_ = d_ff
lowercase_ = d_ext
lowercase_ = d_spout
lowercase_ = num_switch_layers
lowercase_ = num_ext_layers
lowercase_ = num_switch_layers + num_ext_layers
lowercase_ = num_heads
lowercase_ = num_experts
lowercase_ = expert_capacity
lowercase_ = dropout_rate
lowercase_ = layer_norm_epsilon
lowercase_ = router_bias
lowercase_ = router_jitter_noise
lowercase_ = router_dtype
lowercase_ = router_ignore_padding_tokens
lowercase_ = output_hidden_states
lowercase_ = output_attentions
lowercase_ = initializer_factor
lowercase_ = output_router_logits
lowercase_ = use_cache
super().__init__(
separator_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 97 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase__ : Any = kwargs.pop("""text_queries""")
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image)):
lowercase__ : Optional[Any] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase__ : int = image
lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return results
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {}
if "threshold" in kwargs:
lowercase__ : List[Any] = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase__ : int = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = load_image(inputs["""image"""])
lowercase__ : Any = inputs["""candidate_labels"""]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = candidate_labels.split(""",""")
lowercase__ : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
lowercase__ : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework)
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = model_inputs.pop("""target_size""")
lowercase__ : Optional[int] = model_inputs.pop("""candidate_label""")
lowercase__ : Dict = model_inputs.pop("""is_last""")
lowercase__ : Union[str, Any] = self.model(**SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for model_output in model_outputs:
lowercase__ : Optional[int] = model_output["""candidate_label"""]
lowercase__ : Tuple = BaseModelOutput(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""])[0]
for index in outputs["scores"].nonzero():
lowercase__ : Optional[Any] = outputs["""scores"""][index].item()
lowercase__ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0])
lowercase__ : Tuple = {"""score""": score, """label""": label, """box""": box}
results.append(SCREAMING_SNAKE_CASE_)
lowercase__ : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x["score"] , reverse=SCREAMING_SNAKE_CASE_)
if top_k:
lowercase__ : Any = results[:top_k]
return results
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""")
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = box.int().tolist()
lowercase__ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 12 | 0 |
'''simple docstring'''
def a__ ( lowercase : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
_UpperCamelCase = 4
_UpperCamelCase = (1 << p) - 1
for _ in range(p - 2 ):
_UpperCamelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 98 |
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase__ : List[str] = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
lowercase__ : List[Any] = val
return f[i][j]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase__ : Tuple = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowercase__ : str = len(lowercase_ )
if num_items != len(lowercase_ ):
lowercase__ : Optional[int] = (
"""The number of weights must be the same as the number of values.\n"""
F'But got {num_items} weights and {len(lowercase_ )} values'
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
lowercase__ : int = (
"""All weights must be integers but got weight of """
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(lowercase_ )
lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : set = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : Dict = [3, 2, 4, 4]
lowerCamelCase__ : List[Any] = [4, 3, 2, 3]
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Dict = 6
lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 12 | 0 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def a (lowerCAmelCase__ ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def a (lowerCAmelCase__ ):
__a = {}
__a = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__a = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
__a = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__a = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__a = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
__a = value.float()
return upgrade
@torch.no_grad()
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=True ):
from dall_e import Encoder
__a = Encoder()
if os.path.exists(lowerCAmelCase__ ):
__a = torch.load(lowerCAmelCase__ )
else:
__a = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
__a = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
__a = FlavaImageCodebookConfig()
__a = FlavaImageCodebook(lowerCAmelCase__ ).eval()
__a = encoder.state_dict()
__a = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
__a = hf_model.state_dict()
__a = count_parameters(lowerCAmelCase__ )
__a = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 99 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : int = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" )
lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" )
lowercase__ : str = value.float()
for key, value in codebook_state_dict.items():
lowercase__ : Any = value
return upgrade
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ )
else:
lowercase__ : Optional[int] = FlavaConfig()
lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval()
lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ )
if os.path.exists(lowercase_ ):
lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" )
else:
lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ )
hf_model.load_state_dict(lowercase_ )
lowercase__ : Optional[int] = hf_model.state_dict()
lowercase__ : Optional[int] = count_parameters(lowercase_ )
lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ : List[str] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 12 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[int]:
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __snake_case :
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = OPTConfig
lowerCamelCase__ : str = {}
lowerCamelCase__ : List[Any] = """gelu"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , A_=16 , A_=16 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = word_embed_proj_dim
SCREAMING_SNAKE_CASE__ = False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=A_ , **self.config_updates , )
SCREAMING_SNAKE_CASE__ = prepare_opt_inputs_dict(A_ , A_ )
return config, inputs_dict
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFOPTModel(config=A_ )
SCREAMING_SNAKE_CASE__ = inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE__ = 1
# first forward pass
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , use_cache=A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )[0]
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1E-3 )
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ : List[str] = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ : Optional[int] = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ : int = False
lowerCamelCase__ : str = False
lowerCamelCase__ : str = False
lowerCamelCase__ : Tuple = 1_0
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFOPTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A_ , A_ ):
if hasattr(A_ , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A_ , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
SCREAMING_SNAKE_CASE__ = model_class(config=A_ )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(A_ )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE__ = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
SCREAMING_SNAKE_CASE__ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , A_ )
# check that weights remain the same after resizing
SCREAMING_SNAKE_CASE__ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE__ = False
self.assertTrue(A_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , A_ )
SCREAMING_SNAKE_CASE__ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE__ = False
self.assertTrue(A_ )
def __snake_case ( lowerCAmelCase_ ) -> List[str]:
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 9_9
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
SCREAMING_SNAKE_CASE__ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
SCREAMING_SNAKE_CASE__ = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
SCREAMING_SNAKE_CASE__ = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE__ = tf.not_equal(A_ , model.config.pad_token_id )
with tf.GradientTape():
SCREAMING_SNAKE_CASE__ = model(input_ids=A_ , attention_mask=A_ ).last_hidden_state
SCREAMING_SNAKE_CASE__ = (1, 11, 5_12)
self.assertEqual(output.shape , A_ )
SCREAMING_SNAKE_CASE__ = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4E-3 ) )
SCREAMING_SNAKE_CASE__ = tf.function(A_ , jit_compile=A_ )
SCREAMING_SNAKE_CASE__ = xla_generate(A_ , A_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4E-2 ) )
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE__ = '''facebook/opt-350m'''
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE__ = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
SCREAMING_SNAKE_CASE__ = tokenizer(A_ , return_tensors='''tf''' , padding=A_ , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
SCREAMING_SNAKE_CASE__ = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(A_ , A_ , atol=1E-4 ) )
SCREAMING_SNAKE_CASE__ = tf.function(A_ , jit_compile=A_ )
SCREAMING_SNAKE_CASE__ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(A_ , A_ , atol=1E-4 ) )
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase_ ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''facebook/opt-125m'''
SCREAMING_SNAKE_CASE__ = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE__ = tokenizer(A_ , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(A_ , max_length=10 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''facebook/opt-350m'''
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = '''left'''
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
SCREAMING_SNAKE_CASE__ = tokenizer(A_ , return_tensors='''tf''' , padding=A_ )
SCREAMING_SNAKE_CASE__ = inputs['''input_ids''']
SCREAMING_SNAKE_CASE__ = model.generate(input_ids=A_ , attention_mask=inputs['''attention_mask'''] )
SCREAMING_SNAKE_CASE__ = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(input_ids=A_ )
SCREAMING_SNAKE_CASE__ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
SCREAMING_SNAKE_CASE__ = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''facebook/opt-350m'''
SCREAMING_SNAKE_CASE__ = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE__ = tokenizer(A_ , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(A_ , max_length=10 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
| 100 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : str = image_size
lowercase__ : int = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Union[str, Any] = size
lowercase__ : Any = do_normalize
lowercase__ : Tuple = image_mean
lowercase__ : str = image_std
def lowercase__ ( self):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = EfficientFormerImageProcessorTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size"""))
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 12 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.