code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase : List[Any] = logging.getLogger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
return (preds == labels).mean()
@dataclass
class __snake_case :
_a : str= field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_a : Optional[str]= field(
default=lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_a : Optional[str]= field(
default=lowerCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_a : Optional[str]= field(
default=lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __snake_case :
_a : str= field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
_a : str= field(metadata={"help": "Should contain the data files for the task."} )
_a : int= field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_a : bool= field(
default=lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
try:
lowercase : Union[str, Any] = processors[data_args.task_name]()
lowercase : List[Any] = processor.get_labels()
lowercase : Dict = len(SCREAMING_SNAKE_CASE__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase : List[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : Any = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , p.label_ids )}
# Data collator
lowercase : List[str] = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase : List[Any] = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase : List[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase : List[Any] = trainer.evaluate()
lowercase : List[Any] = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(SCREAMING_SNAKE_CASE__ )
return results
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 20 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : int = """blip_2_vision_model"""
def __init__( self, lowerCamelCase=14_08, lowerCamelCase=61_44, lowerCamelCase=39, lowerCamelCase=16, lowerCamelCase=2_24, lowerCamelCase=14, lowerCamelCase="gelu", lowerCamelCase=0.0_0_0_0_1, lowerCamelCase=0.0, lowerCamelCase=1E-10, lowerCamelCase=True, **lowerCamelCase, ) -> str:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Tuple = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : List[Any] = patch_size
_lowercase : List[str] = image_size
_lowercase : Dict = initializer_range
_lowercase : str = attention_dropout
_lowercase : Tuple = layer_norm_eps
_lowercase : List[Any] = hidden_act
_lowercase : Optional[Any] = qkv_bias
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase)
_lowercase , _lowercase : Dict = cls.get_config_dict(lowerCamelCase, **lowerCamelCase)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type') == "blip-2":
_lowercase : Tuple = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCamelCase, **lowerCamelCase)
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = """blip_2_qformer"""
def __init__( self, lowerCamelCase=3_05_22, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=0, lowerCamelCase="absolute", lowerCamelCase=2, lowerCamelCase=14_08, **lowerCamelCase, ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase, **lowerCamelCase)
_lowercase : Any = vocab_size
_lowercase : int = hidden_size
_lowercase : List[str] = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : str = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Dict = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Tuple = position_embedding_type
_lowercase : Tuple = cross_attention_frequency
_lowercase : List[Any] = encoder_hidden_size
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase)
_lowercase , _lowercase : int = cls.get_config_dict(lowerCamelCase, **lowerCamelCase)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type') == "blip-2":
_lowercase : str = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls, 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCamelCase, **lowerCamelCase)
class _lowerCamelCase( _a ):
lowercase_ : Dict = """blip-2"""
lowercase_ : Union[str, Any] = True
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=32, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
if vision_config is None:
_lowercase : List[Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.')
if qformer_config is None:
_lowercase : str = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.')
if text_config is None:
_lowercase : Tuple = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
_lowercase : Tuple = BlipaVisionConfig(**lowerCamelCase)
_lowercase : List[Any] = BlipaQFormerConfig(**lowerCamelCase)
_lowercase : Tuple = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : Optional[Any] = CONFIG_MAPPING[text_model_type](**lowerCamelCase)
_lowercase : Optional[int] = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : List[Any] = num_query_tokens
_lowercase : Dict = self.vision_config.hidden_size
_lowercase : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : List[str] = 0.0_2
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **lowerCamelCase, )
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = copy.deepcopy(self.__dict__)
_lowercase : Union[str, Any] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Dict = self.text_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 21 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Optional[Any] = {'''vocab_file''': '''vocab.json'''}
__SCREAMING_SNAKE_CASE :Tuple = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__SCREAMING_SNAKE_CASE :List[str] = {'''mgp-str''': 27}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = VOCAB_FILES_NAMES
_lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , snake_case_ : List[str] , snake_case_ : List[Any]="[GO]" , snake_case_ : Optional[Any]="[GO]" , snake_case_ : Union[str, Any]="[s]" , snake_case_ : Any="[GO]" , **snake_case_ : Dict ):
super().__init__(
unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , pad_token=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase = json.load(snake_case_ )
_UpperCAmelCase = {v: k for k, v in self.vocab.items()}
@property
def lowercase ( self : str ):
return len(self.vocab )
def lowercase ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def lowercase ( self : Optional[Any] , snake_case_ : int ):
_UpperCAmelCase = []
for s in text:
char_tokens.extend(snake_case_ )
return char_tokens
def lowercase ( self : Union[str, Any] , snake_case_ : Union[str, Any] ):
return self.vocab.get(snake_case_ , self.vocab.get(self.unk_token ) )
def lowercase ( self : int , snake_case_ : int ):
return self.decoder.get(snake_case_ )
def lowercase ( self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case_ ) )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + "\n" )
return (vocab_file,)
| 22 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase__: Union[str, Any] = [
"good first issue",
"feature request",
"wip",
]
def snake_case_ ( ) -> int:
UpperCAmelCase : Dict = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase : Any = g.get_repo('''huggingface/accelerate''' )
UpperCAmelCase : str = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCAmelCase : i.created_at , reverse=_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = comments[0] if len(_lowerCAmelCase ) > 0 else None
UpperCAmelCase : Optional[int] = dt.utcnow()
UpperCAmelCase : Union[str, Any] = (current_time - issue.updated_at).days
UpperCAmelCase : Optional[Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 23 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 0 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_snake_case ,int(b / 2 ) ) * actual_power(_snake_case ,int(b / 2 ) )
else:
return a * actual_power(_snake_case ,int(b / 2 ) ) * actual_power(_snake_case ,int(b / 2 ) )
def lowercase_ ( _snake_case ,_snake_case ):
if b < 0:
return 1 / actual_power(_snake_case ,_snake_case )
return actual_power(_snake_case ,_snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 25 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase_ ( snake_case_ = 2000000 ):
_A : list[int] = [0]
_A : int
for idx in range(1,ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_A : int = 0
# the area corresponding to the grid that gives the product closest to target
_A : int = 0
# an estimate of b, using the quadratic formula
_A : float
# the largest integer less than b_estimate
_A : int
# the largest integer less than b_estimate
_A : int
# the triangle number corresponding to b_floor
_A : int
# the triangle number corresponding to b_ceil
_A : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:],1 ):
_A : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_A : Optional[int] = floor(snake_case_ )
_A : List[str] = ceil(snake_case_ )
_A : Optional[int] = triangle_numbers[b_floor]
_A : Tuple = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_A : Union[str, Any] = triangle_b_first_guess * triangle_a
_A : Optional[Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_A : int = triangle_b_second_guess * triangle_a
_A : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Dict = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "distilbert"
A_ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , __a=3_0522 , __a=512 , __a=False , __a=6 , __a=12 , __a=768 , __a=4 * 768 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.02 , __a=0.1 , __a=0.2 , __a=0 , **__a , ):
'''simple docstring'''
__a : Any = vocab_size
__a : List[str] = max_position_embeddings
__a : Optional[Any] = sinusoidal_pos_embds
__a : int = n_layers
__a : Optional[Any] = n_heads
__a : Optional[int] = dim
__a : Optional[int] = hidden_dim
__a : Optional[Any] = dropout
__a : Tuple = attention_dropout
__a : Dict = activation
__a : List[str] = initializer_range
__a : Optional[Any] = qa_dropout
__a : Optional[int] = seq_classif_dropout
super().__init__(**__a , pad_token_id=__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 27 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 0 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ):
"""simple docstring"""
if rouge_types is None:
UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = scoring.BootstrapAggregator()
else:
UpperCamelCase = []
for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ )
if use_aggregator:
aggregator.add_scores(UpperCamelCase__ )
else:
scores.append(UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = aggregator.aggregate()
else:
UpperCamelCase = {}
for key in scores[0]:
UpperCamelCase = [score[key] for score in scores]
return result
| 28 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=6.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase="fp4" , _UpperCamelCase=False , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = load_in_abit
UpperCAmelCase_ : Any = load_in_abit
UpperCAmelCase_ : List[Any] = llm_inta_threshold
UpperCAmelCase_ : Tuple = llm_inta_skip_modules
UpperCAmelCase_ : Tuple = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase_ : Optional[Any] = llm_inta_has_fpaa_weight
UpperCAmelCase_ : Union[str, Any] = bnb_abit_quant_type
UpperCAmelCase_ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase_ : int = torch.floataa
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = getattr(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , torch.dtype ):
UpperCAmelCase_ : Optional[Any] = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def __UpperCAmelCase ( self ) -> int:
if not isinstance(self.llm_inta_threshold , _UpperCamelCase ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _UpperCamelCase ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _UpperCamelCase ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , _UpperCamelCase ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , _UpperCamelCase ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , _UpperCamelCase ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def __UpperCAmelCase ( self ) -> str:
return self.load_in_abit or self.load_in_abit
def __UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : str = cls(**_UpperCamelCase )
UpperCAmelCase_ : Dict = []
for key, value in kwargs.items():
if hasattr(_UpperCamelCase , _UpperCamelCase ):
setattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
to_remove.append(_UpperCamelCase )
for key in to_remove:
kwargs.pop(_UpperCamelCase , _UpperCamelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
UpperCAmelCase_ : Union[str, Any] = self.to_dict()
UpperCAmelCase_ : Optional[Any] = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '\n'
writer.write(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
UpperCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Optional[Any]:
return f"{self.__class__.__name__} {self.to_json_string()}"
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> str:
if use_diff is True:
UpperCAmelCase_ : Tuple = self.to_diff_dict()
else:
UpperCAmelCase_ : Dict = self.to_dict()
return json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + "\n"
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
UpperCAmelCase_ : str = self.to_dict()
# get the default config dict
UpperCAmelCase_ : Optional[Any] = BitsAndBytesConfig().to_dict()
UpperCAmelCase_ : Optional[int] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase_ : List[str] = value
return serializable_config_dict
| 29 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
__a = parser.parse_args()
__a = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__a = CLIPImageProcessor()
__a = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
__a = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCAmelCase : List[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
_UpperCAmelCase : Optional[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
_UpperCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_g":
_UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
_UpperCAmelCase : List[str] = value
elif weight_type == "bias":
_UpperCAmelCase : str = value
else:
_UpperCAmelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = []
_UpperCAmelCase : Optional[Any] = fairseq_model.state_dict()
_UpperCAmelCase : Dict = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
_UpperCAmelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCAmelCase : Dict = True
if "*" in mapped_key:
_UpperCAmelCase : Union[str, Any] = name.split(_UpperCAmelCase )[0].split("." )[-2]
_UpperCAmelCase : Dict = mapped_key.replace("*" , _UpperCAmelCase )
if "weight_g" in name:
_UpperCAmelCase : Any = "weight_g"
elif "weight_v" in name:
_UpperCAmelCase : str = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
_UpperCAmelCase : Optional[int] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase : Optional[Any] = "weight"
else:
_UpperCAmelCase : Tuple = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCamelCase_ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = full_name.split("conv_layers." )[-1]
_UpperCAmelCase : str = name.split("." )
_UpperCAmelCase : Any = int(items[0] )
_UpperCAmelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCAmelCase : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCAmelCase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCAmelCase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCAmelCase : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = torch.load(_UpperCAmelCase )
_UpperCAmelCase : Tuple = WavLMConfigOrig(checkpoint["cfg"] )
_UpperCAmelCase : Tuple = WavLMOrig(_UpperCAmelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_UpperCAmelCase : List[str] = WavLMConfig.from_pretrained(_UpperCAmelCase )
else:
_UpperCAmelCase : List[Any] = WavLMConfig()
_UpperCAmelCase : Optional[Any] = WavLMModel(_UpperCAmelCase )
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase )
hf_wavlm.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 31 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase_ : Any = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : torch.nn.Module , __A : BnbQuantizationConfig , __A : Union[str, os.PathLike] = None , __A : Optional[Dict[str, Union[int, str, torch.device]]] = None , __A : Optional[List[str]] = None , __A : Optional[Dict[Union[int, str], Union[int, str]]] = None , __A : Optional[Union[str, os.PathLike]] = None , __A : bool = False , ) -> Dict:
"""simple docstring"""
a_ : Optional[int] = bnb_quantization_config.load_in_abit
a_ : Any = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
a_ : List[Any] = []
# custom device map
if isinstance(__A , __A ) and len(device_map.keys() ) > 1:
a_ : Optional[int] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a_ : List[str] = get_keys_to_not_convert(__A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__A )
a_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a_ : List[Any] = []
a_ : str = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__A )
# compatibility with peft
a_ : Any = load_in_abit
a_ : List[Any] = load_in_abit
a_ : List[Any] = get_parameter_device(__A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
a_ : Any = replace_with_bnb_layers(__A , __A , modules_to_not_convert=__A )
# convert param to the right dtype
a_ : Union[str, Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a_ : int = name.replace('.weight' , '' ).replace('.bias' , '' )
a_ : int = getattr(__A , __A , __A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__A ):
param.to(__A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
a_ : List[Any] = replace_with_bnb_layers(
__A , __A , modules_to_not_convert=__A )
a_ : Any = get_quantized_model_device_map(
__A , __A , __A , max_memory=__A , no_split_module_classes=__A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a_ : Dict = True
a_ : Any = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__A , __A , __A , dtype=bnb_quantization_config.torch_dtype , offload_folder=__A , offload_state_dict=__A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__A , device_map=__A , offload_dir=__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Dict , __A : List[Any]=None , __A : Union[str, Any]=None , __A : Optional[Any]=None ) -> str:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
a_ : Dict = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__A , __A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
a_ : Any = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a_ : Optional[Any] = {}
a_ : Union[str, Any] = special_dtypes
a_ : Optional[int] = no_split_module_classes
a_ : str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a_ : Optional[Any] = get_balanced_memory(
__A , low_zero=(device_map == 'balanced_low_0') , max_memory=__A , **__A , )
a_ : int = max_memory
a_ : Optional[int] = infer_auto_device_map(__A , **__A )
if isinstance(__A , __A ):
# check if don't have any quantized module on the cpu
a_ : str = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a_ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[str] , __A : List[Any]=None , __A : List[Any]=None ) -> Any:
"""simple docstring"""
if modules_to_not_convert is None:
a_ : Union[str, Any] = []
a_ , a_ : List[str] = _replace_with_bnb_layers(
__A , __A , __A , __A )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : Tuple=None , __A : Any=None , ) -> Any:
"""simple docstring"""
a_ : int = False
for name, module in model.named_children():
if current_key_name is None:
a_ : List[str] = []
current_key_name.append(__A )
if isinstance(__A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a_ : Union[str, Any] = '.'.join(__A )
a_ : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a_ : int = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a_ : Union[str, Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a_ : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
a_ : Union[str, Any] = module.weight.data
if module.bias is not None:
a_ : str = module.bias.data
bnb_module.requires_grad_(__A )
setattr(__A , __A , __A )
a_ : int = True
if len(list(module.children() ) ) > 0:
a_ , a_ : List[str] = _replace_with_bnb_layers(
__A , __A , __A , __A )
a_ : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Optional[int]:
"""simple docstring"""
with init_empty_weights():
a_ : List[str] = deepcopy(__A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a_ : List[str] = find_tied_parameters(__A )
# For compatibility with Accelerate < 0.18
if isinstance(__A , __A ):
a_ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a_ : Any = sum(__A , [] )
a_ : Optional[int] = len(__A ) > 0
# Check if it is a base model
a_ : Union[str, Any] = False
if hasattr(__A , 'base_model_prefix' ):
a_ : Union[str, Any] = not hasattr(__A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a_ : Tuple = list(model.named_children() )
a_ : str = [list_modules[-1][0]]
# add last module together with tied weights
a_ : List[str] = set(__A ) - set(__A )
a_ : Dict = list(set(__A ) ) + list(__A )
# remove ".weight" from the keys
a_ : List[str] = ['.weight', '.bias']
a_ : List[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a_ : Dict = name.replace(__A , '' )
filtered_module_names.append(__A )
return filtered_module_names
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Any:
"""simple docstring"""
for m in model.modules():
if isinstance(__A , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE_ ( __A : nn.Module ) -> Union[str, Any]:
"""simple docstring"""
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : int , __A : Optional[Any] , __A : List[str] , __A : Dict , __A : Tuple , __A : Optional[Any] ) -> Dict:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__A , __A , 0 , dtype=__A , value=__A )
a_ : Optional[int] = param_name
a_ : List[str] = model
if "." in tensor_name:
a_ : int = tensor_name.split('.' )
for split in splits[:-1]:
a_ : int = getattr(__A , __A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
a_ : Optional[Any] = new_module
a_ : List[str] = splits[-1]
# offload weights
a_ : Union[str, Any] = False
offload_weight(module._parameters[tensor_name] , __A , __A , index=__A )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __A , index=__A , )
else:
offload_weight(__A , __A , __A , index=__A )
offload_weight(__A , param_name.replace('weight' , 'SCB' ) , __A , index=__A )
set_module_tensor_to_device(__A , __A , 'meta' , dtype=__A , value=torch.empty(*param.size() ) )
| 32 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def A ( self : Union[str, Any] ) -> Dict:
lowercase_ : List[str] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase_ : List[str] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowercase_ : Dict = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
lowercase_ : int = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase_ : str = model(A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A , atol=1e-3 ) )
@slow
def A ( self : Optional[int] ) -> List[str]:
lowercase_ : List[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
lowercase_ : Union[str, Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowercase_ : Optional[Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
lowercase_ : List[str] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase_ : Optional[int] = model(A )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A , atol=1e-3 ) )
| 33 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : float , _a : float , _a : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = MBartConfig
lowercase = {}
lowercase = "gelu"
def __init__( self : Dict , snake_case_ : str , snake_case_ : Tuple=13 , snake_case_ : Optional[Any]=7 , snake_case_ : List[Any]=True , snake_case_ : List[str]=False , snake_case_ : Any=99 , snake_case_ : str=32 , snake_case_ : int=2 , snake_case_ : Optional[Any]=4 , snake_case_ : Tuple=37 , snake_case_ : Tuple=0.1 , snake_case_ : Any=0.1 , snake_case_ : Dict=20 , snake_case_ : Optional[int]=2 , snake_case_ : List[Any]=1 , snake_case_ : Dict=0 , ):
snake_case__ : int = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : Union[str, Any] = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : str = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[Any] = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : Tuple = bos_token_id
def lowerCamelCase ( self : Dict ):
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ : List[Any] = prepare_mbart_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def lowerCamelCase ( self : int , snake_case_ : Dict , snake_case_ : str ):
snake_case__ : Union[str, Any] = TFMBartModel(config=snake_case_ ).get_decoder()
snake_case__ : Any = inputs_dict["""input_ids"""]
snake_case__ : Union[str, Any] = input_ids[:1, :]
snake_case__ : Any = inputs_dict["""attention_mask"""][:1, :]
snake_case__ : Optional[Any] = inputs_dict["""head_mask"""]
snake_case__ : Optional[int] = 1
# first forward pass
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
snake_case__ , snake_case__ : List[Any] = outputs.to_tuple()
snake_case__ : Tuple = past_key_values[1]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> str:
if attention_mask is None:
snake_case__ : Union[str, Any] = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def lowerCamelCase ( self : str , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[str] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCamelCase ( self : str ):
snake_case__ : int = TFMBartModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : List[str] ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
lowercase = [
" UN Chief Says There Is No Military Solution in Syria",
]
lowercase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowercase = "facebook/mbart-large-en-ro"
@cached_property
def lowerCamelCase ( self : Optional[Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self : Union[str, Any] , **snake_case_ : int ):
snake_case__ : Tuple = self.translate_src_text(**snake_case_ )
self.assertListEqual(self.expected_text , snake_case_ )
def lowerCamelCase ( self : List[str] , **snake_case_ : Any ):
snake_case__ : Tuple = self.tokenizer(self.src_text , **snake_case_ , return_tensors="""tf""" )
snake_case__ : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case__ : str = self.tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
return generated_words
@slow
def lowerCamelCase ( self : int ):
self._assert_generated_batch_equal_expected()
| 35 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = []
for d in reversed(_lowerCamelCase ):
idx.append(flat_idx % d )
_lowerCAmelCase : Dict = flat_idx // d
return tuple(reversed(_lowerCamelCase ) )
@torch.jit.ignore
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ):
'''simple docstring'''
def reduce_edge_list(_lowerCamelCase ) -> None:
_lowerCAmelCase : List[str] = True
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : Any = -1 * (i + 1)
l[reversed_idx] &= tally
_lowerCAmelCase : str = l[reversed_idx]
if start_edges is None:
_lowerCAmelCase : Optional[int] = [s == 0 for s in start]
reduce_edge_list(_lowerCamelCase )
if end_edges is None:
_lowerCAmelCase : Dict = [e == (d - 1) for e, d in zip(_lowerCamelCase , _lowerCamelCase )]
reduce_edge_list(_lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_lowerCamelCase ) == 0:
return [()]
elif len(_lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_lowerCAmelCase : List[Tuple[slice, ...]] = []
_lowerCAmelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_lowerCamelCase , _lowerCamelCase ):
if s == e:
path_list.append(slice(_lowerCamelCase , s + 1 ) )
else:
break
_lowerCAmelCase : Tuple[slice, ...] = tuple(_lowerCamelCase )
_lowerCAmelCase : Tuple = len(_lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(_lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCAmelCase : Optional[int] = start[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCAmelCase : str = end[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_lowerCAmelCase : Tuple = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = t.shape[:no_batch_dims]
_lowerCAmelCase : List[str] = list(_flat_idx_to_idx(_lowerCamelCase , _lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
_lowerCAmelCase : List[str] = list(_flat_idx_to_idx(flat_end - 1 , _lowerCamelCase ) )
# Get an ordered list of slices to perform
_lowerCAmelCase : str = _get_minimal_slice_set(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
_lowerCAmelCase : Optional[int] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , ):
'''simple docstring'''
if not (len(_lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
_lowerCAmelCase : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_lowerCamelCase )]
_lowerCAmelCase : Tuple = tuple([max(_lowerCamelCase ) for s in zip(*_lowerCamelCase )] )
def _prep_inputs(_lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_lowerCAmelCase : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_lowerCAmelCase : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_lowerCAmelCase : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_lowerCAmelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , _lowerCamelCase )
_lowerCAmelCase : Tuple = None
if _out is not None:
_lowerCAmelCase : List[Any] = tensor_tree_map(lambda _lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_lowerCAmelCase : int = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_lowerCAmelCase : Union[str, Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = prepped_outputs
for _ in range(_lowerCamelCase ):
# Chunk the input
if not low_mem:
_lowerCAmelCase : List[str] = _select_chunk
else:
_lowerCAmelCase : Dict = partial(
_chunk_slice , flat_start=_lowerCamelCase , flat_end=min(_lowerCamelCase , i + chunk_size ) , no_batch_dims=len(_lowerCamelCase ) , )
_lowerCAmelCase : Dict[str, Any] = tensor_tree_map(_lowerCamelCase , _lowerCamelCase )
# Run the layer on the chunk
_lowerCAmelCase : int = layer(**_lowerCamelCase )
# Allocate space for the output
if out is None:
_lowerCAmelCase : Union[str, Any] = tensor_tree_map(lambda _lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(_lowerCamelCase , _lowerCamelCase ):
def assign(_lowerCamelCase , _lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
assign(_lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_lowerCAmelCase : Tuple = da[k]
assign(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for xa, xa in zip(_lowerCamelCase , _lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_lowerCAmelCase : int = xa
elif isinstance(_lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_lowerCAmelCase : List[str] = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
_lowerCAmelCase : Any = tensor_tree_map(lambda _lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , _lowerCamelCase )
return out
class UpperCAmelCase_ :
def __init__( self, __a = 512, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = max_chunk_size
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[tuple] = None
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
logging.info("Tuning chunk size...")
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_lowerCAmelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2)) + 1)]
_lowerCAmelCase : List[Any] = [c for c in candidates if c > min_chunk_size]
_lowerCAmelCase : Union[str, Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a) -> bool:
try:
with torch.no_grad():
fn(*__a, chunk_size=__a)
return True
except RuntimeError:
return False
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = len(__a) - 1
while i > min_viable_chunk_size_index:
_lowerCAmelCase : Tuple = test_chunk_size(candidates[i])
if not viable:
_lowerCAmelCase : int = (min_viable_chunk_size_index + i) // 2
else:
_lowerCAmelCase : Any = i
_lowerCAmelCase : Union[str, Any] = (i + len(__a) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = True
for aa, aa in zip(__a, __a):
assert type(__a) == type(__a)
if isinstance(__a, (list, tuple)):
consistent &= self._compare_arg_caches(__a, __a)
elif isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [v for _, v in sorted(aa.items(), key=lambda __a: x[0])]
_lowerCAmelCase : Dict = [v for _, v in sorted(aa.items(), key=lambda __a: x[0])]
consistent &= self._compare_arg_caches(__a, __a)
else:
consistent &= aa == aa
return consistent
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : int = True
_lowerCAmelCase : tuple = tree_map(lambda __a: a.shape if isinstance(__a, torch.Tensor) else a, __a, __a)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(__a)
_lowerCAmelCase : Tuple = self._compare_arg_caches(self.cached_arg_data, __a)
else:
# Otherwise, we can reuse the precomputed value
_lowerCAmelCase : Union[str, Any] = False
if not consistent:
_lowerCAmelCase : Any = self._determine_favorable_chunk_size(
__a, __a, __a, )
_lowerCAmelCase : Any = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 36 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
_lowerCAmelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : str = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase__ : Optional[Any] = g.get_repo("""huggingface/transformers""" )
lowerCAmelCase__ : List[str] = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase__ : str = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = comments[0] if len(UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 37 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 0 |
import copy
import re
class _SCREAMING_SNAKE_CASE :
snake_case__ : str = """hp"""
snake_case__ : Dict = {}
snake_case__ : Tuple = None
@classmethod
def _A ( cls : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
UpperCamelCase :List[str] = prefix
UpperCamelCase :List[str] = defaults
cls.build_naming_info()
@staticmethod
def _A ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
if len(__lowerCamelCase ) == 0:
return ""
UpperCamelCase :Union[str, Any] = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__lowerCamelCase ) + 1 ):
UpperCamelCase :Any = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCamelCase :Optional[Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__lowerCamelCase : Optional[Any] ):
UpperCamelCase :Optional[int] = """"""
while integer != 0:
UpperCamelCase :List[str] = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
UpperCamelCase :Tuple = 0
while True:
UpperCamelCase :List[Any] = word + """#""" + int_to_alphabetic(__lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
UpperCamelCase :List[str] = sword
break
UpperCamelCase :Dict = short_word
UpperCamelCase :Optional[Any] = word
return short_word
@staticmethod
def _A ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Any = param_name.split("""_""" )
UpperCamelCase :int = [TrialShortNamer.shortname_for_word(__lowerCamelCase , __lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCamelCase :Tuple = ["""""", """_"""]
for separator in separators:
UpperCamelCase :List[str] = separator.join(__lowerCamelCase )
if shortname not in info["reverse_short_param"]:
UpperCamelCase :List[Any] = shortname
UpperCamelCase :List[Any] = param_name
return shortname
return param_name
@staticmethod
def _A ( __lowerCamelCase : str , __lowerCamelCase : Dict ):
UpperCamelCase :Dict = TrialShortNamer.shortname_for_key(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Dict = short_name
UpperCamelCase :int = param_name
@classmethod
def _A ( cls : Tuple ):
if cls.NAMING_INFO is not None:
return
UpperCamelCase :Optional[Any] = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
UpperCamelCase :Any = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = info
@classmethod
def _A ( cls : Dict , __lowerCamelCase : Optional[Any] ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCamelCase :Any = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCamelCase :int = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = 1 if v else 0
UpperCamelCase :int = """""" if isinstance(__lowerCamelCase , (int, float) ) else """-"""
UpperCamelCase :Dict = F"""{key}{sep}{v}"""
name.append(__lowerCamelCase )
return "_".join(__lowerCamelCase )
@classmethod
def _A ( cls : Tuple , __lowerCamelCase : Any ):
UpperCamelCase :Optional[int] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCamelCase :Union[str, Any] = []
else:
UpperCamelCase :Any = repr.split("""_""" )
UpperCamelCase :List[str] = {}
for value in values:
if "-" in value:
UpperCamelCase , UpperCamelCase :Dict = value.split("""-""" )
else:
UpperCamelCase :Union[str, Any] = re.sub("""[0-9.]""" , """""" , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = float(re.sub("""[^0-9.]""" , """""" , __lowerCamelCase ) )
UpperCamelCase :Any = cls.NAMING_INFO["""reverse_short_param"""][p_k]
UpperCamelCase :str = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCamelCase :Tuple = cls.DEFAULTS[k]
return parameters
| 38 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "roformer"
def __init__( self , UpperCAmelCase=5_0000 , UpperCAmelCase=None , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1536 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = rotary_value
_UpperCAmelCase = use_cache
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 39 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 0 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ , A_ )-> Optional[Any]:
'''simple docstring'''
def run_func(A_ ):
@wraps(A_ )
def run_in_eager_mode(*A_ , **A_ ):
return func(*A_ , **A_ )
@wraps(A_ )
@tf.function(experimental_compile=A_ )
def run_in_graph_mode(*A_ , **A_ ):
return func(*A_ , **A_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowercase ( A_ , A_ , A_ )-> ["tf.Tensor"]:
'''simple docstring'''
a : Dict = random.Random()
a : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(A_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : TensorFlowBenchmarkArguments
UpperCAmelCase : PretrainedConfig
UpperCAmelCase : str = "TensorFlow"
@property
def __snake_case ( self : Any):
return tf.__version__
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int):
# initialize GPU on separate process
a : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow.")
a : Tuple = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
return self._measure_speed(_inference)
def __snake_case ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : int = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow.")
a : Any = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
return self._measure_speed(_train)
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase)
a : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow.")
a : Any = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
return self._measure_memory(_inference)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase)
a : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow.")
a : Optional[int] = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
return self._measure_memory(_train)
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported.")
a : int = (
hasattr(__UpperCAmelCase , "architectures")
and isinstance(config.architectures , __UpperCAmelCase)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a : Union[str, Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
a : int = __import__("transformers" , fromlist=[model_class])
a : int = getattr(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = model_cls(__UpperCAmelCase)
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`.")
else:
a : str = TF_MODEL_MAPPING[config.__class__](__UpperCAmelCase)
# encoder-decoder has vocab size saved differently
a : Dict = config.vocab_size if hasattr(__UpperCAmelCase , "vocab_size") else config.encoder.vocab_size
a : List[str] = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , training=__UpperCAmelCase)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(__UpperCAmelCase , training=__UpperCAmelCase)
a : str = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.")
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported.")
a : Dict = (
hasattr(__UpperCAmelCase , "architectures")
and isinstance(config.architectures , __UpperCAmelCase)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a : Union[str, Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
a : Optional[Any] = __import__("transformers" , fromlist=[model_class])
a : List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = model_cls(__UpperCAmelCase)
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`.")
else:
a : Optional[int] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCAmelCase)
# encoder-decoder has vocab size saved differently
a : int = config.vocab_size if hasattr(__UpperCAmelCase , "vocab_size") else config.encoder.vocab_size
a : Union[str, Any] = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
a : str = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)[0]
a : Any = tf.gradients(__UpperCAmelCase , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
a : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)[0]
a : int = tf.gradients(__UpperCAmelCase , model.trainable_variables)
return gradients
a : List[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Any):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation")
timeit.repeat(__UpperCAmelCase , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
a : List[Any] = timeit.repeat(
__UpperCAmelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCAmelCase) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''')
def __snake_case ( self : Dict , __UpperCAmelCase : Callable[[], None]):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used.")
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line.")
a : List[Any] = start_memory_tracing("transformers")
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`")
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU.")
a : Any = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU.")
# init nvml
nvml.nvmlInit()
func()
a : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
a : Tuple = nvml.nvmlDeviceGetMemoryInfo(__UpperCAmelCase)
a : Tuple = meminfo.used
a : List[Any] = Memory(__UpperCAmelCase)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow.")
a : Tuple = None
else:
a : int = measure_peak_memory_cpu(__UpperCAmelCase)
a : List[Any] = Memory(__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else memory_bytes
if self.args.trace_memory_line_by_line:
a : Dict = stop_memory_tracing(__UpperCAmelCase)
if memory is None:
a : str = summary.total
else:
a : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''')
return "N/A", None
| 40 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 0 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _lowercase ( _lowercase ):
@slow
@require_torch
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowerCamelCase__ : str = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__ : int = bertabert.config.encoder.vocab_size
lowerCamelCase__ : int = tokenizer.sep_token_id
lowerCamelCase__ : List[Any] = tokenizer.cls_token_id
lowerCamelCase__ : List[Any] = 128
lowerCamelCase__ : Optional[Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowerCamelCase__ : Optional[int] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowerCamelCase__ : Dict = train_dataset.select(range(32 ) )
lowerCamelCase__ : Any = val_dataset.select(range(16 ) )
lowerCamelCase__ : List[Any] = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase__: int ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCamelCase__ : Dict = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase__ , max_length=512 )
lowerCamelCase__ : Tuple = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase__ , max_length=128 )
lowerCamelCase__ : List[str] = inputs.input_ids
lowerCamelCase__ : int = inputs.attention_mask
lowerCamelCase__ : str = outputs.input_ids
lowerCamelCase__ : Dict = outputs.input_ids.copy()
lowerCamelCase__ : Optional[int] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowerCamelCase__ : Optional[Any] = outputs.attention_mask
assert all(len(UpperCamelCase__ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCamelCase__ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase__: Tuple ):
lowerCamelCase__ : Any = pred.label_ids
lowerCamelCase__ : Tuple = pred.predictions
# all unnecessary tokens are removed
lowerCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Any = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase__ ) )] ) / len(UpperCamelCase__ )
return {"accuracy": accuracy}
# map train dataset
lowerCamelCase__ : Tuple = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase__ , batch_size=UpperCamelCase__ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowerCamelCase__ : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase__ , batch_size=UpperCamelCase__ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowerCamelCase__ : Any = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : List[str] = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase__ , per_device_train_batch_size=UpperCamelCase__ , per_device_eval_batch_size=UpperCamelCase__ , predict_with_generate=UpperCamelCase__ , evaluation_strategy="""steps""" , do_train=UpperCamelCase__ , do_eval=UpperCamelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCamelCase__ : int = SeqaSeqTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , tokenizer=UpperCamelCase__ , )
# start training
trainer.train()
| 41 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Dict:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "weight" in name:
_snake_case = 'weight'
elif "bias" in name:
_snake_case = 'bias'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> int:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = SEWConfig()
if is_finetuned:
_snake_case = model.wav_encoder.wav_model.cfg
else:
_snake_case = model.cfg
_snake_case = fs_config.conv_bias
_snake_case = eval(fs_config.conv_feature_layers )
_snake_case = [x[0] for x in conv_layers]
_snake_case = [x[1] for x in conv_layers]
_snake_case = [x[2] for x in conv_layers]
_snake_case = 'gelu'
_snake_case = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_snake_case = 0.0
_snake_case = fs_config.activation_fn.name
_snake_case = fs_config.encoder_embed_dim
_snake_case = 0.0_2
_snake_case = fs_config.encoder_ffn_embed_dim
_snake_case = 1e-5
_snake_case = fs_config.encoder_layerdrop
_snake_case = fs_config.encoder_attention_heads
_snake_case = fs_config.conv_pos_groups
_snake_case = fs_config.conv_pos
_snake_case = len(__A )
_snake_case = fs_config.encoder_layers
_snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_snake_case = model.cfg
_snake_case = fs_config.final_dropout
_snake_case = fs_config.layerdrop
_snake_case = fs_config.activation_dropout
_snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_snake_case = fs_config.attention_dropout
_snake_case = fs_config.dropout_input
_snake_case = fs_config.dropout
_snake_case = fs_config.mask_channel_length
_snake_case = fs_config.mask_channel_prob
_snake_case = fs_config.mask_length
_snake_case = fs_config.mask_prob
_snake_case = 'Wav2Vec2FeatureExtractor'
_snake_case = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None , __A=None , __A=True ) -> List[str]:
if is_finetuned:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_snake_case = SEWConfig.from_pretrained(__A )
else:
_snake_case = convert_config(model[0] , __A )
_snake_case = model[0].eval()
_snake_case = True if config.feat_extract_norm == 'layer' else False
_snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
if is_finetuned:
if dict_path:
_snake_case = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.eos_index
_snake_case = len(target_dict.symbols )
_snake_case = os.path.join(__A , 'vocab.json' )
if not os.path.isdir(__A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
with open(__A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __A )
_snake_case = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__A , )
_snake_case = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
_snake_case = SEWForCTC(__A )
else:
_snake_case = SEWModel(__A )
feature_extractor.save_pretrained(__A )
recursively_load_weights(__A , __A , __A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 42 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 43 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Optional[Any] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = "vit_msn"
def __init__( self , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=1e-06 , a__=224 , a__=16 , a__=3 , a__=True , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : Union[str, Any] = qkv_bias
| 44 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase_ = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'facebook/nllb-200-distilled-600M'
__UpperCAmelCase : int = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCAmelCase : Tuple = 'translator'
__UpperCAmelCase : int = AutoTokenizer
__UpperCAmelCase : Dict = AutoModelForSeqaSeqLM
__UpperCAmelCase : List[str] = LANGUAGE_CODES
__UpperCAmelCase : Dict = ['text', 'text', 'text']
__UpperCAmelCase : Dict = ['text']
def __UpperCAmelCase ( self , _a , _a , _a ):
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
__a = self.lang_to_code[src_lang]
__a = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_a , return_tensors='''pt''' , src_lang=_a , tgt_lang=_a )
def __UpperCAmelCase ( self , _a ):
return self.model.generate(**_a )
def __UpperCAmelCase ( self , _a ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_a )
| 45 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 0 |
"""simple docstring"""
from math import isclose, sqrt
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
lowerCAmelCase = point_y / 4 / point_x
lowerCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase = outgoing_gradient**2 + 4
lowerCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
lowerCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase = x_minus if isclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else x_plus
lowerCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float = 1.4 , SCREAMING_SNAKE_CASE : float = -9.6 ):
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = first_x_coord
lowerCAmelCase = first_y_coord
lowerCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = next_point(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'{solution() = }')
| 46 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase : Optional[Any] = generate_large_matrix()
lowerCamelCase : Optional[int] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _lowerCAmelCase ( _UpperCamelCase : list[list[int]] ) -> None:
"""simple docstring"""
assert all(row == sorted(_UpperCamelCase , reverse=_UpperCamelCase ) for row in grid )
assert all(list(_UpperCamelCase ) == sorted(_UpperCamelCase , reverse=_UpperCamelCase ) for col in zip(*_UpperCamelCase ) )
def _lowerCAmelCase ( _UpperCamelCase : list[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =len(_UpperCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_SCREAMING_SNAKE_CASE =(left + right) // 2
_SCREAMING_SNAKE_CASE =array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_SCREAMING_SNAKE_CASE =mid + 1
else:
_SCREAMING_SNAKE_CASE =mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : list[list[int]] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =len(grid[0] )
for i in range(len(_UpperCamelCase ) ):
_SCREAMING_SNAKE_CASE =find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCamelCase ) * len(grid[0] )) - total
def _lowerCAmelCase ( _UpperCamelCase : list[list[int]] ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def _lowerCAmelCase ( _UpperCamelCase : list[list[int]] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
for row in grid:
for i, number in enumerate(_UpperCamelCase ):
if number < 0:
total += len(_UpperCamelCase ) - i
break
return total
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Running benchmarks' )
_SCREAMING_SNAKE_CASE =(
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_SCREAMING_SNAKE_CASE =timeit(f"{func}(grid=grid)" , setup=_UpperCamelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 47 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = ["""image_processor""", """tokenizer"""]
lowerCamelCase_ : Dict = """BlipImageProcessor"""
lowerCamelCase_ : Any = """AutoTokenizer"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Dict = False
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[Any] = self.image_processor
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
lowerCamelCase : int = self.tokenizer
lowerCamelCase : str = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
return text_encoding
# add pixel_values
lowerCamelCase : Dict = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
if text is not None:
lowerCamelCase : Union[str, Any] = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
else:
lowerCamelCase : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__ )
return encoding_image_processor
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : str = self.tokenizer.model_input_names
lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 48 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _A :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=13 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=24 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Tuple=5 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=37 , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : List[Any]=2 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = patch_size
__a = max_length
__a = num_mel_bins
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = frequency_stride
__a = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__a = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__a = (self.max_length - self.patch_size) // self.time_stride + 1
__a = frequency_out_dimension * time_out_dimension
__a = num_patches + 2
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = self.get_config()
return config, input_values, labels
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = ASTModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Dict = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : int = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
UpperCamelCase__ : str = False
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : Optional[int] = False
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = ASTModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''')
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''input_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ASTModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def __snake_case ( ):
__a = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
__a , __a = torchaudio.load(_UpperCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''')
if is_torchaudio_available()
else None
)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.default_feature_extractor
__a = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(__SCREAMING_SNAKE_CASE)
__a = self.default_feature_extractor
__a , __a = prepare_audio()
__a = audio.squeeze().numpy()
__a = feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
__a = torch.Size((1, 527))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([-0.87_60, -7.00_42, -8.66_02]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 49 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = DistilBertTokenizer
UpperCAmelCase__ = DistilBertTokenizerFast
UpperCAmelCase__ = True
@slow
def A_ ( self : str ) -> Any:
lowerCamelCase__ : List[str] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
lowerCamelCase__ : Dict = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 50 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
def A (__A : int , __A : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase : Any = ""
UpperCamelCase : List[Any] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase , UpperCamelCase : List[str] = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase : Any = [1 for i in range(len(_lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
UpperCamelCase : str = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase : Optional[Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase : List[str] = j - k + 1 # noqa: E741
UpperCamelCase : Union[str, Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase : Union[str, Any] = length[j]
UpperCamelCase : List[Any] = j
# create that string
UpperCamelCase : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] ="BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] ="AutoTokenizer"
def __init__( self : Dict , __A : Optional[int] , __A : Union[str, Any] , __A : Any ):
super().__init__(__A , __A )
# add QFormer tokenizer
__UpperCamelCase = qformer_tokenizer
def __call__( self : str , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
__UpperCamelCase = BatchFeature()
if text is not None:
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
encoding.update(__A )
__UpperCamelCase = self.qformer_tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
__UpperCamelCase = qformer_text_encoding.pop('input_ids' )
__UpperCamelCase = qformer_text_encoding.pop('attention_mask' )
if images is not None:
__UpperCamelCase = self.image_processor(__A , return_tensors=__A )
encoding.update(__A )
return encoding
def _lowerCamelCase ( self : List[str] , *__A : Dict , **__A : Dict ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowerCamelCase ( self : Optional[Any] , *__A : Union[str, Any] , **__A : Optional[int] ):
return self.tokenizer.decode(*__A , **__A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self : Union[str, Any] , __A : Union[str, Any] , **__A : Dict ):
if os.path.isfile(__A ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__A )
return super().save_pretrained(__A , **__A )
@classmethod
def _lowerCamelCase ( cls : List[Any] , __A : int , **__A : Dict ):
__UpperCamelCase = AutoTokenizer.from_pretrained(__A , subfolder='qformer_tokenizer' )
__UpperCamelCase = cls._get_arguments_from_pretrained(__A , **__A )
args.append(__A )
return cls(*__A )
| 53 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if len(lowerCAmelCase_ ) <= 1:
return arr, 0
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) // 2
__SCREAMING_SNAKE_CASE = arr[0:mid]
__SCREAMING_SNAKE_CASE = arr[mid:]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = 0
while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__SCREAMING_SNAKE_CASE = count_inversions_bf(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowerCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__SCREAMING_SNAKE_CASE = count_inversions_bf(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase_ )
# an empty list should also have zero inversions
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = count_inversions_bf(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 54 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
'''simple docstring'''
a_ : str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a_ : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a_ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 55 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(__UpperCAmelCase ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__ ( ) -> Iterator[int]:
'''simple docstring'''
snake_case_ = 2
while True:
if is_prime(__UpperCAmelCase ):
yield num
num += 1
def __magic_name__ ( __UpperCAmelCase = 200_0000 ) -> int:
'''simple docstring'''
return sum(takewhile(lambda __UpperCAmelCase : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 0 |
"""simple docstring"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = [
[],
[],
[],
]
def snake_case ( self , __a , __a ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__a )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ):
return str(self.queue )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
lowercase_ = """2020.9.26"""
lowercase_ = """xcodz-dot, cclaus, dhruvmanila"""
def lowerCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ) ->tuple[float, float]:
if not all(isinstance(__lowerCamelCase , (float, int) ) for val in locals().values() ):
_SCREAMING_SNAKE_CASE = F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = ((x * distance) / (z + distance)) * scale
_SCREAMING_SNAKE_CASE = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : str , __lowerCamelCase : float ) ->tuple[float, float, float]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""Axis must be a str""" )
_SCREAMING_SNAKE_CASE = locals()
del input_variables["axis"]
if not all(isinstance(__lowerCamelCase , (float, int) ) for val in input_variables.values() ):
_SCREAMING_SNAKE_CASE = (
"""Input values except axis must either be float or int: """
F'{list(input_variables.values() )}'
)
raise TypeError(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_SCREAMING_SNAKE_CASE = x * math.cos(__lowerCamelCase ) - y * math.sin(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = y * math.cos(__lowerCamelCase ) + x * math.sin(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = z
elif axis == "x":
_SCREAMING_SNAKE_CASE = y * math.cos(__lowerCamelCase ) - z * math.sin(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = z * math.cos(__lowerCamelCase ) + y * math.sin(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = x
elif axis == "y":
_SCREAMING_SNAKE_CASE = x * math.cos(__lowerCamelCase ) - z * math.sin(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = z * math.cos(__lowerCamelCase ) + x * math.sin(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 9_0.0) = }""")
| 58 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 0 |
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Union[str, Any] = len(__lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
snake_case : Dict = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
snake_case : Tuple = arr[mi::-1] + arr[mi + 1 : len(__lowerCamelCase )]
# Reverse whole list
snake_case : Union[str, Any] = arr[cur - 1 :: -1] + arr[cur : len(__lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
__lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 59 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 60 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=2 , lowercase_=24 , lowercase_=16 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=None , lowercase_=2 , lowercase_=2 , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Tuple = max_length
UpperCAmelCase_ : Optional[Any] = num_mel_bins
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Any = scope
UpperCAmelCase_ : str = frequency_stride
UpperCAmelCase_ : Dict = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase_ : Any = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase_ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase_ : Optional[int] = frequency_out_dimension * time_out_dimension
UpperCAmelCase_ : Union[str, Any] = num_patches + 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, input_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = ASTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : str = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : str = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Dict = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ASTModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowercase_ )
UpperCAmelCase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : List[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = ASTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __a ( ):
UpperCAmelCase_ : Any = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset" )
UpperCAmelCase_ , UpperCAmelCase_ : Any = torchaudio.load(__lowerCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.default_feature_extractor
UpperCAmelCase_ : str = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowercase_ )
UpperCAmelCase_ : List[Any] = self.default_feature_extractor
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = prepare_audio()
UpperCAmelCase_ : int = audio.squeeze().numpy()
UpperCAmelCase_ : Any = feature_extractor(lowercase_ , sampling_rate=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
UpperCAmelCase_ : List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 61 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 0 |
import argparse
import json
import subprocess
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =[]
__UpperCamelCase =(
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
__UpperCamelCase =subprocess.run(SCREAMING_SNAKE_CASE__ , shell=SCREAMING_SNAKE_CASE__ , stdout=subprocess.PIPE )
__UpperCamelCase =output.stdout.decode('utf-8' )
__UpperCamelCase =json.loads(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE__ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__UpperCamelCase ='\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
return values.split(',' )
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_A = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 62 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=__a , )
assert hasattr(self , "env" )
def UpperCamelCase__ ( self : Optional[int] , __a : Optional[int] ):
# configuration for running training on smdistributed Model Parallel
_a = {
"enabled": True,
"processes_per_host": 8,
}
_a = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
_a = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
_a = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=__a , py_version="py36" , )
def UpperCamelCase__ ( self : List[Any] , __a : Union[str, Any] ):
TrainingJobAnalytics(__a ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase__ ( self : Optional[int] , __a : Tuple ):
# create estimator
_a = self.create_estimator(__a )
# run training
estimator.fit()
# result dataframe
_a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_a = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_a = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_a = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __a )
| 63 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
_snake_case : List[Any] = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_snake_case , _snake_case : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
_snake_case , _snake_case : List[str] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 64 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings", set() )
@pytest.fixture
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
class A :
def __init__(self : Dict , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = metric_id
class A :
__UpperCAmelCase : Union[str, Any] = [MetricMock(UpperCAmelCase_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def lowercase_ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub", HfhMock() )
@pytest.mark.parametrize(
"func, args", [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Optional[int]:
'''simple docstring'''
if "tmp_path" in args:
UpperCAmelCase__ = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__A, match="https://huggingface.co/docs/evaluate" ):
func(*__A )
| 65 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : List[Any] = """gptj"""
_A : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: int , snake_case: int=50_400 , snake_case: Optional[Any]=2_048 , snake_case: Any=4_096 , snake_case: Dict=28 , snake_case: Union[str, Any]=16 , snake_case: Optional[int]=64 , snake_case: List[Any]=None , snake_case: List[str]="gelu_new" , snake_case: Dict=0.0 , snake_case: Union[str, Any]=0.0 , snake_case: List[Any]=0.0 , snake_case: List[Any]=1E-5 , snake_case: Any=0.0_2 , snake_case: Union[str, Any]=True , snake_case: int=50_256 , snake_case: int=50_256 , snake_case: List[Any]=False , **snake_case: List[str] , ) -> Optional[Any]:
snake_case_ :Optional[Any] = vocab_size
snake_case_ :List[Any] = n_positions
snake_case_ :List[str] = n_embd
snake_case_ :List[str] = n_layer
snake_case_ :int = n_head
snake_case_ :int = n_inner
snake_case_ :List[str] = rotary_dim
snake_case_ :Optional[Any] = activation_function
snake_case_ :int = resid_pdrop
snake_case_ :List[str] = embd_pdrop
snake_case_ :str = attn_pdrop
snake_case_ :Union[str, Any] = layer_norm_epsilon
snake_case_ :Optional[Any] = initializer_range
snake_case_ :Any = use_cache
snake_case_ :Tuple = bos_token_id
snake_case_ :Any = eos_token_id
super().__init__(
bos_token_id=snake_case , eos_token_id=snake_case , tie_word_embeddings=snake_case , **snake_case )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: int , snake_case: PretrainedConfig , snake_case: str = "default" , snake_case: List[PatchingSpec] = None , snake_case: bool = False , ) -> Any:
super().__init__(snake_case , task=snake_case , patching_specs=snake_case , use_past=snake_case )
if not getattr(self._config , """pad_token_id""" , snake_case ):
# TODO: how to do that better?
snake_case_ :Optional[Any] = 0
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]:
snake_case_ :Tuple = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction="""inputs""" )
snake_case_ :Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case_ :Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase_ ( self: Tuple ) -> int:
return self._config.n_layer
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
return self._config.n_head
def lowerCAmelCase_ ( self: int , snake_case: PreTrainedTokenizer , snake_case: int = -1 , snake_case: int = -1 , snake_case: bool = False , snake_case: Optional[TensorType] = None , ) -> Mapping[str, Any]:
snake_case_ :Tuple = super(snake_case , self ).generate_dummy_inputs(
snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case )
# We need to order the input in the way they appears in the forward()
snake_case_ :int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case_, snake_case_ :List[str] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case_ :Dict = seqlen + 2
snake_case_ :List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ :Optional[int] = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(self.num_layers )
]
snake_case_ :Dict = common_inputs["""attention_mask"""]
if self.use_past:
snake_case_ :Optional[int] = ordered_inputs["""attention_mask"""].dtype
snake_case_ :List[str] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case , snake_case , dtype=snake_case )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase_ ( self: List[str] ) -> int:
return 13
| 66 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
# Initialise PyTorch model
__lowerCamelCase = FunnelConfig.from_json_file(UpperCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = FunnelBaseModel(UpperCamelCase__ ) if base_model else FunnelModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__UpperCAmelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 67 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
A__ = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
A__ = get_activation("gelu" )
A__ = get_activation("gelu_10" )
A__ = torch_builtin(lowercase )
A__ = geluaa(lowercase )
A__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(lowercase ):
get_activation("bogus" )
with self.assertRaises(lowercase ):
get_activation(lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = get_activation("gelu" )
A__ = 1
A__ = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase ):
A__ = acta.a
| 68 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = BlenderbotConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = "gelu"
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=7, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=99, lowerCAmelCase__=32, lowerCAmelCase__=2, lowerCAmelCase__=4, lowerCAmelCase__=37, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=20, lowerCAmelCase__=2, lowerCAmelCase__=1, lowerCAmelCase__=0, ) -> Optional[int]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
def a_ ( self) -> List[str]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
snake_case_ = tf.concat([input_ids, eos_tensor], axis=1)
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
snake_case_ = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
snake_case_ = prepare_blenderbot_inputs_dict(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
return config, inputs_dict
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = TFBlenderbotModel(config=lowerCAmelCase__).get_decoder()
snake_case_ = inputs_dict['input_ids']
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict['attention_mask'][:1, :]
snake_case_ = inputs_dict['head_mask']
snake_case_ = 1
# first forward pass
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, head_mask=lowerCAmelCase__, use_cache=lowerCAmelCase__)
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3), config.vocab_size)
snake_case_ = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.inta)
# append to next input_ids and
snake_case_ = tf.concat([input_ids, next_tokens], axis=-1)
snake_case_ = tf.concat([attention_mask, next_attn_mask], axis=-1)
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__)[0]
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, past_key_values=lowerCAmelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
snake_case_ = int(ids_tensor((1,), output_from_past.shape[-1]))
snake_case_ = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__, lowerCAmelCase__, rtol=1e-3)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Optional[Any]:
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> Union[str, Any]:
snake_case_ = TFBlenderbotModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__)
def a_ ( self) -> Dict:
self.config_tester.run_common_tests()
def a_ ( self) -> Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__)
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = ["My friends are cool but they eat too many carbs."]
SCREAMING_SNAKE_CASE_ = "facebook/blenderbot-400M-distill"
@cached_property
def a_ ( self) -> Union[str, Any]:
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def a_ ( self) -> Union[str, Any]:
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def a_ ( self) -> Dict:
snake_case_ = self.tokenizer(self.src_text, return_tensors='tf')
snake_case_ = self.model.generate(
model_inputs.input_ids, )
snake_case_ = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCAmelCase__)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 69 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
A__ : str ={'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any =['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
A__ : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 0 |
def A ( a_ ,a_ ,a_ ) -> Tuple:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a_ ,n - 1 ,a_ ) * a) % mod
else:
__UpperCamelCase : Dict =binary_exponentiation(a_ ,n / 2 ,a_ )
return (b * b) % mod
# a prime number
A_ :str = 701
A_ :Optional[Any] = 1000000000
A_ :int = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 71 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
lowerCAmelCase__ = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
if isinstance(A_, A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def snake_case_ ( A_ : int, A_ : Tuple, A_ : Any, A_ : Tuple, A_ : Dict=False ):
'''simple docstring'''
_lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_lowerCamelCase : List[str] = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_lowerCamelCase : int = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_lowerCamelCase : str = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_lowerCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_lowerCamelCase : int = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_lowerCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_lowerCamelCase : Any = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_lowerCamelCase : Dict = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_lowerCamelCase : int = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def snake_case_ ( A_ : str, A_ : int, A_ : str, A_ : List[str], A_ : Union[str, Any]=None ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3, dim=0 )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3, dim=0 )
_lowerCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.norm.weight''']
_lowerCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_lowerCamelCase : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Union[str, Any] = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_lowerCamelCase : Tuple = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = torch.load(A_, map_location='''cpu''' )
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[str] = checkpoint['''time_embed.0.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint['''time_embed.0.bias''']
_lowerCamelCase : List[str] = checkpoint['''time_embed.2.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
_lowerCamelCase : List[str] = checkpoint['''label_emb.weight''']
_lowerCamelCase : Any = checkpoint['''input_blocks.0.0.weight''']
_lowerCamelCase : Union[str, Any] = checkpoint['''input_blocks.0.0.bias''']
_lowerCamelCase : Tuple = unet_config['''down_block_types''']
_lowerCamelCase : Tuple = unet_config['''layers_per_block''']
_lowerCamelCase : List[Any] = unet_config['''attention_head_dim''']
_lowerCamelCase : str = unet_config['''block_out_channels''']
_lowerCamelCase : Dict = 1
_lowerCamelCase : str = channels_list[0]
for i, layer_type in enumerate(A_ ):
_lowerCamelCase : str = channels_list[i]
_lowerCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
_lowerCamelCase : Union[str, Any] = F'''down_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Tuple = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : List[Any] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Tuple = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
_lowerCamelCase : List[Any] = F'''down_blocks.{i}.resnets.{j}'''
_lowerCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : Optional[int] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Dict = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
_lowerCamelCase : str = F'''down_blocks.{i}.attentions.{j}'''
_lowerCamelCase : Any = F'''input_blocks.{current_layer}.1'''
_lowerCamelCase : str = convert_attention(
A_, A_, A_, A_, A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : List[Any] = F'''down_blocks.{i}.downsamplers.0'''
_lowerCamelCase : List[str] = F'''input_blocks.{current_layer}.0'''
_lowerCamelCase : str = convert_resnet(A_, A_, A_, A_ )
current_layer += 1
_lowerCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_lowerCamelCase : Tuple = '''mid_block.resnets.0'''
_lowerCamelCase : int = '''middle_block.0'''
_lowerCamelCase : List[str] = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : List[str] = '''mid_block.attentions.0'''
_lowerCamelCase : Any = '''middle_block.1'''
_lowerCamelCase : List[Any] = convert_attention(A_, A_, A_, A_, A_ )
_lowerCamelCase : Tuple = '''mid_block.resnets.1'''
_lowerCamelCase : Tuple = '''middle_block.2'''
_lowerCamelCase : Dict = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = unet_config['''up_block_types''']
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : List[str] = F'''up_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Dict = F'''output_blocks.{current_layer}.0'''
_lowerCamelCase : Union[str, Any] = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Tuple = F'''up_blocks.{i}.upsamplers.0'''
_lowerCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_lowerCamelCase : Tuple = convert_resnet(A_, A_, A_, A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : Any = F'''up_blocks.{i}.resnets.{j}'''
_lowerCamelCase : Optional[Any] = F'''output_blocks.{current_layer}.0'''
_lowerCamelCase : Tuple = convert_resnet(A_, A_, A_, A_, has_skip=A_ )
_lowerCamelCase : List[str] = F'''up_blocks.{i}.attentions.{j}'''
_lowerCamelCase : Optional[int] = F'''output_blocks.{current_layer}.1'''
_lowerCamelCase : int = convert_attention(
A_, A_, A_, A_, A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCamelCase : Optional[int] = F'''up_blocks.{i}.upsamplers.0'''
_lowerCamelCase : Tuple = F'''output_blocks.{current_layer-1}.2'''
_lowerCamelCase : Any = convert_resnet(A_, A_, A_, A_ )
_lowerCamelCase : Optional[int] = checkpoint['''out.0.weight''']
_lowerCamelCase : int = checkpoint['''out.0.bias''']
_lowerCamelCase : Union[str, Any] = checkpoint['''out.2.weight''']
_lowerCamelCase : Optional[int] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 72 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple[int, int]:
def constraint_to_multiple_of(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 , lowerCamelCase__=None ):
__lowerCamelCase : Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowerCamelCase : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
__lowerCamelCase : Optional[int] = math.ceil(val / multiple ) * multiple
return x
__lowerCamelCase : Tuple = (output_size, output_size) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else output_size
__lowerCamelCase , __lowerCamelCase : Optional[Any] = get_image_size(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Tuple = output_size
# determine new height and width
__lowerCamelCase : Any = output_height / input_height
__lowerCamelCase : int = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowerCamelCase : str = scale_width
else:
# fit height
__lowerCamelCase : Optional[int] = scale_height
__lowerCamelCase : Union[str, Any] = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCamelCase__ )
__lowerCamelCase : Any = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCamelCase__ )
return (new_height, new_width)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = ['''pixel_values''']
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
__lowerCamelCase : str = get_size_dict(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = do_resize
__lowerCamelCase : Optional[Any] = size
__lowerCamelCase : Optional[Any] = keep_aspect_ratio
__lowerCamelCase : List[Any] = ensure_multiple_of
__lowerCamelCase : Any = resample
__lowerCamelCase : Union[str, Any] = do_rescale
__lowerCamelCase : Tuple = rescale_factor
__lowerCamelCase : List[Any] = do_normalize
__lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Any ,):
__lowerCamelCase : Tuple = get_size_dict(SCREAMING_SNAKE_CASE__)
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
__lowerCamelCase : Tuple = get_resize_output_image_size(
SCREAMING_SNAKE_CASE__ ,output_size=(size['height'], size['width']) ,keep_aspect_ratio=SCREAMING_SNAKE_CASE__ ,multiple=SCREAMING_SNAKE_CASE__ ,)
return resize(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : str ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
__lowerCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Tuple = size if size is not None else self.size
__lowerCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowerCamelCase : Dict = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
__lowerCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : int = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
__lowerCamelCase : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE__)
if not valid_images(SCREAMING_SNAKE_CASE__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__lowerCamelCase : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE__) for image in images]
if do_resize:
__lowerCamelCase : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__) for image in images]
if do_rescale:
__lowerCamelCase : Union[str, Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__) for image in images]
if do_normalize:
__lowerCamelCase : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[Tuple] = None):
__lowerCamelCase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE__) != len(SCREAMING_SNAKE_CASE__):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[str] = target_sizes.numpy()
__lowerCamelCase : Any = []
for idx in range(len(SCREAMING_SNAKE_CASE__)):
__lowerCamelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : List[Any] = logits.argmax(dim=1)
__lowerCamelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 73 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''switch_transformers'''
_lowerCamelCase: int = ['''past_key_values''']
_lowerCamelCase: int = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[int] ,A_ : Optional[int]=3_2128 ,A_ : Optional[Any]=768 ,A_ : Any=64 ,A_ : Tuple=2048 ,A_ : List[str]=64 ,A_ : str=12 ,A_ : Optional[int]=3 ,A_ : Union[str, Any]=12 ,A_ : Optional[int]=3 ,A_ : Any=12 ,A_ : Union[str, Any]=8 ,A_ : List[str]=False ,A_ : List[str]=0.01 ,A_ : List[str]="float32" ,A_ : Tuple=False ,A_ : Dict=32 ,A_ : List[str]=128 ,A_ : Tuple=0.1 ,A_ : List[Any]=1e-6 ,A_ : List[str]=0.0_01 ,A_ : Tuple=0.0_01 ,A_ : Dict=1.0 ,A_ : int="relu" ,A_ : List[Any]=True ,A_ : Dict=False ,A_ : List[Any]=True ,A_ : int=0 ,A_ : Optional[Any]=1 ,**A_ : str ,) -> Dict:
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_sparse_encoder_layers
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A = self.num_layers // self.num_sparse_encoder_layers
else:
A = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A = self.num_decoder_layers # HACK: this will create 0 sparse layers
A = num_heads
A = num_experts
A = expert_capacity
A = router_bias
A = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
A = router_dtype
A = router_ignore_padding_tokens
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = add_router_probs
A = router_z_loss_coef
A = router_aux_loss_coef
A = self.feed_forward_proj.split('-' )
A = act_info[-1]
A = act_info[0] == 'gated'
if len(A_ ) > 1 and act_info[0] != "gated" or len(A_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A = 'gelu_new'
super().__init__(
pad_token_id=A_ ,eos_token_id=A_ ,is_encoder_decoder=A_ ,**A_ ,) | 74 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 0 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a_ ( __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(__snake_case , '''_dynamo''' ):
return False
return isinstance(__snake_case , torch._dynamo.eval_frame.OptimizedModule )
def a_ ( __snake_case : Tuple , __snake_case : bool = True ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCamelCase_ =is_compiled_module(__snake_case )
if is_compiled:
lowerCamelCase_ =model
lowerCamelCase_ =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =model.module
if not keep_fpaa_wrapper:
lowerCamelCase_ =getattr(__snake_case , '''forward''' )
lowerCamelCase_ =model.__dict__.pop('''_original_forward''' , __snake_case )
if original_forward is not None:
while hasattr(__snake_case , '''__wrapped__''' ):
lowerCamelCase_ =forward.__wrapped__
if forward == original_forward:
break
lowerCamelCase_ =forward
if getattr(__snake_case , '''_converted_to_transformer_engine''' , __snake_case ):
convert_model(__snake_case , to_transformer_engine=__snake_case )
if is_compiled:
lowerCamelCase_ =model
lowerCamelCase_ =compiled_model
return model
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
PartialState().wait_for_everyone()
def a_ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__snake_case , __snake_case )
elif PartialState().local_process_index == 0:
torch.save(__snake_case , __snake_case )
@contextmanager
def a_ ( **__snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
lowerCamelCase_ =str(__snake_case )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a_ ( __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if not hasattr(__snake_case , '''__qualname__''' ) and not hasattr(__snake_case , '''__name__''' ):
lowerCamelCase_ =getattr(__snake_case , '''__class__''' , __snake_case )
if hasattr(__snake_case , '''__qualname__''' ):
return obj.__qualname__
if hasattr(__snake_case , '''__name__''' ):
return obj.__name__
return str(__snake_case )
def a_ ( __snake_case : Dict , __snake_case : Any ) -> Tuple:
"""simple docstring"""
for key, value in source.items():
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =destination.setdefault(__snake_case , {} )
merge_dicts(__snake_case , __snake_case )
else:
lowerCamelCase_ =value
return destination
def a_ ( __snake_case : int = None ) -> bool:
"""simple docstring"""
if port is None:
lowerCamelCase_ =2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 75 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase__ =1
@register_to_config
def __init__( self : Dict , a : int = 1000 , a : Optional[Union[np.ndarray, List[float]]] = None ) -> Any:
"""simple docstring"""
self.set_timesteps(a )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : int = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : str = 4
# running values
SCREAMING_SNAKE_CASE : Optional[int] = []
def __UpperCamelCase ( self : Tuple , a : int , a : Union[str, torch.device] = None ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
SCREAMING_SNAKE_CASE : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Dict = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Any = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : List[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : Optional[Any] = timesteps.to(a )
SCREAMING_SNAKE_CASE : str = []
def __UpperCamelCase ( self : int , a : torch.FloatTensor , a : int , a : torch.FloatTensor , a : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
SCREAMING_SNAKE_CASE : Dict = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Tuple = timestep_index + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(a )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : List[Any] = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Tuple = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : int = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[int] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : List[str] = self._get_prev_sample(a , a , a , a )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def __UpperCamelCase ( self : Optional[int] , a : torch.FloatTensor , *a : Union[str, Any] , **a : Union[str, Any] ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __UpperCamelCase ( self : List[Any] , a : str , a : Tuple , a : Any , a : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : Any = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : str = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : str = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : List[Any] = (sample - sigma * ets) / max(a , 1e-8 )
SCREAMING_SNAKE_CASE : Optional[int] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps | 76 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : BigBirdConfig
lowerCamelCase__ : jnp.dtype = jnp.floataa
lowerCamelCase__ : bool = True
def _UpperCAmelCase ( self ) -> Optional[int]:
super().setup()
lowercase__ : Optional[int] = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *a , **a ) -> Union[str, Any]:
lowercase__ : List[str] = super().__call__(*a , **a )
lowercase__ : str = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
'''simple docstring'''
def cross_entropy(_lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : str=None ):
lowercase__ : List[str] = logits.shape[-1]
lowercase__ : Any = (labels[..., None] == jnp.arange(_lowerCAmelCase )[None]).astype('f4' )
lowercase__ : Any = jax.nn.log_softmax(_lowerCAmelCase , axis=-1 )
lowercase__ : str = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase__ : List[Any] = reduction(_lowerCAmelCase )
return loss
lowercase__ : int = partial(_lowerCAmelCase , reduction=jnp.mean )
lowercase__ : Tuple = cross_entropy(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : int = cross_entropy(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[Any] = cross_entropy(_lowerCAmelCase , _lowerCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : str = "google/bigbird-roberta-base"
lowerCamelCase__ : int = 3_0_0_0
lowerCamelCase__ : int = 1_0_5_0_0
lowerCamelCase__ : int = 1_2_8
lowerCamelCase__ : int = 3
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 5
# tx_args
lowerCamelCase__ : float = 3E-5
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 2_0_0_0_0
lowerCamelCase__ : float = 0.0095
lowerCamelCase__ : str = "bigbird-roberta-natural-questions"
lowerCamelCase__ : str = "training-expt"
lowerCamelCase__ : str = "data/nq-training.jsonl"
lowerCamelCase__ : str = "data/nq-validation.jsonl"
def _UpperCAmelCase ( self ) -> Any:
os.makedirs(self.base_dir , exist_ok=a )
lowercase__ : Tuple = os.path.join(self.base_dir , self.save_dir )
lowercase__ : int = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : int
lowerCamelCase__ : int = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self , a ) -> List[str]:
lowercase__ : Any = self.collate_fn(a )
lowercase__ : Dict = jax.tree_util.tree_map(a , a )
return batch
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
lowercase__ , lowercase__ : Any = self.fetch_inputs(features['input_ids'] )
lowercase__ : Optional[Any] = {
'input_ids': jnp.array(a , dtype=jnp.intaa ),
'attention_mask': jnp.array(a , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def _UpperCAmelCase ( self , a ) -> List[str]:
lowercase__ : List[Any] = [self._fetch_inputs(a ) for ids in input_ids]
return zip(*a )
def _UpperCAmelCase ( self , a ) -> Tuple:
lowercase__ : Any = [1 for _ in range(len(a ) )]
while len(a ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any]=None ):
'''simple docstring'''
if seed is not None:
lowercase__ : Tuple = dataset.shuffle(seed=_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) // batch_size ):
lowercase__ : Optional[int] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_lowerCAmelCase )
@partial(jax.pmap , axis_name='batch' )
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
'''simple docstring'''
def loss_fn(_lowerCAmelCase : Union[str, Any] ):
lowercase__ : Any = model_inputs.pop('start_labels' )
lowercase__ : List[str] = model_inputs.pop('end_labels' )
lowercase__ : Any = model_inputs.pop('pooled_labels' )
lowercase__ : Dict = state.apply_fn(**_lowerCAmelCase , params=_lowerCAmelCase , dropout_rng=_lowerCAmelCase , train=_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : List[Any] = outputs
return state.loss_fn(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
lowercase__ , lowercase__ : Dict = jax.random.split(_lowerCAmelCase )
lowercase__ : Dict = jax.value_and_grad(_lowerCAmelCase )
lowercase__ , lowercase__ : Dict = grad_fn(state.params )
lowercase__ : Optional[Any] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
lowercase__ : Tuple = jax.lax.pmean(_lowerCAmelCase , 'batch' )
lowercase__ : Tuple = state.apply_gradients(grads=_lowerCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def a_ ( _lowerCAmelCase : Tuple , **_lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[str] = model_inputs.pop('start_labels' )
lowercase__ : int = model_inputs.pop('end_labels' )
lowercase__ : str = model_inputs.pop('pooled_labels' )
lowercase__ : Optional[int] = state.apply_fn(**_lowerCAmelCase , params=state.params , train=_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : List[str] = outputs
lowercase__ : Optional[Any] = state.loss_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : int = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class UpperCAmelCase_ ( train_state.TrainState):
lowerCamelCase__ : Callable = struct.field(pytree_node=_a)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : Args
lowerCamelCase__ : Callable
lowerCamelCase__ : Callable
lowerCamelCase__ : Callable
lowerCamelCase__ : Callable
lowerCamelCase__ : wandb
lowerCamelCase__ : Callable = None
def _UpperCAmelCase ( self , a , a , a , a=None ) -> Dict:
lowercase__ : Optional[int] = model.params
lowercase__ : Tuple = TrainState.create(
apply_fn=model.__call__ , params=a , tx=a , loss_fn=a , )
if ckpt_dir is not None:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = restore_checkpoint(a , a )
lowercase__ : List[str] = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
lowercase__ , lowercase__ : str = build_tx(**a )
lowercase__ : Tuple = train_state.TrainState(
step=a , apply_fn=model.__call__ , params=a , tx=a , opt_state=a , )
lowercase__ : Optional[int] = args
lowercase__ : Any = data_collator
lowercase__ : Union[str, Any] = lr
lowercase__ : List[Any] = params
lowercase__ : Tuple = jax_utils.replicate(a )
return state
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : str = self.args
lowercase__ : str = len(a ) // args.batch_size
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase__ : Any = jnp.array(0 , dtype=jnp.floataa )
lowercase__ : List[Any] = get_batched_dataset(a , args.batch_size , seed=a )
lowercase__ : int = 0
for batch in tqdm(a , total=a , desc=f"""Running EPOCH-{epoch}""" ):
lowercase__ : Dict = self.data_collator(a )
lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(a , a , **a )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
lowercase__ : int = jax_utils.unreplicate(state.step )
lowercase__ : Dict = running_loss.item() / i
lowercase__ : List[Any] = self.scheduler_fn(state_step - 1 )
lowercase__ : Dict = self.evaluate(a , a )
lowercase__ : Tuple = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(a ) )
self.logger.log(a , commit=a )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=a )
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
lowercase__ : Any = get_batched_dataset(a , self.args.batch_size )
lowercase__ : Optional[int] = len(a ) // self.args.batch_size
lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa )
lowercase__ : Optional[int] = 0
for batch in tqdm(a , total=a , desc='Evaluating ... ' ):
lowercase__ : int = self.data_collator(a )
lowercase__ : Optional[int] = self.val_step_fn(a , **a )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
lowercase__ : int = jax_utils.unreplicate(a )
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=' ... ' )
self.model_save_fn(a , params=state.params )
with open(os.path.join(a , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(a , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(a , 'data_collator.joblib' ) )
with open(os.path.join(a , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , a )
print('DONE' )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ):
'''simple docstring'''
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=' ... ' )
with open(os.path.join(_lowerCAmelCase , 'flax_model.msgpack' ) , 'rb' ) as f:
lowercase__ : Tuple = from_bytes(state.params , f.read() )
with open(os.path.join(_lowerCAmelCase , 'opt_state.msgpack' ) , 'rb' ) as f:
lowercase__ : List[str] = from_bytes(state.opt_state , f.read() )
lowercase__ : int = joblib.load(os.path.join(_lowerCAmelCase , 'args.joblib' ) )
lowercase__ : Optional[Any] = joblib.load(os.path.join(_lowerCAmelCase , 'data_collator.joblib' ) )
with open(os.path.join(_lowerCAmelCase , 'training_state.json' ) , 'r' ) as f:
lowercase__ : Union[str, Any] = json.load(_lowerCAmelCase )
lowercase__ : Tuple = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
'''simple docstring'''
lowercase__ : Union[str, Any] = num_train_steps - warmup_steps
lowercase__ : int = optax.linear_schedule(init_value=_lowerCAmelCase , end_value=_lowerCAmelCase , transition_steps=_lowerCAmelCase )
lowercase__ : Optional[Any] = optax.linear_schedule(init_value=_lowerCAmelCase , end_value=1E-7 , transition_steps=_lowerCAmelCase )
lowercase__ : int = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : int ):
'''simple docstring'''
def weight_decay_mask(_lowerCAmelCase : Optional[int] ):
lowercase__ : Tuple = traverse_util.flatten_dict(_lowerCAmelCase )
lowercase__ : Any = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_lowerCAmelCase )
lowercase__ : List[str] = scheduler_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[Any] = optax.adamw(learning_rate=_lowerCAmelCase , weight_decay=_lowerCAmelCase , mask=_lowerCAmelCase )
return tx, lr
| 77 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 0 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self :Optional[int] ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self :int , lowercase_ :str ) -> List[str]:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowercase_ )
UpperCAmelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowercase_ )
UpperCAmelCase = extract_label(lowercase_ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['lr']
UpperCAmelCase = int(config['num_epochs'] )
UpperCAmelCase = int(config['seed'] )
UpperCAmelCase = int(config['batch_size'] )
UpperCAmelCase = config['image_size']
if not isinstance(lowercase_ , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names]
UpperCAmelCase = list(set(lowercase_ ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(lowercase_ ) )
UpperCAmelCase = int(0.8 * len(lowercase_ ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace('step_' , '' ) )
UpperCAmelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(lowercase_ ),
'epoch': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 78 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = CycleDiffusionPipeline
snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_A = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A = CLIPTextModel(__UpperCAmelCase )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ):
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_A = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith("mps" ):
_A = torch.manual_seed(__UpperCAmelCase )
else:
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_A = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.get_dummy_components()
for name, module in components.items():
if hasattr(__UpperCAmelCase , "half" ):
_A = module.half()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 79 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 | 0 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = cva.getAffineTransform(__A , __A )
return cva.warpAffine(__A , __A , (rows, cols) )
if __name__ == "__main__":
# read original image
a__ : Dict = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
a__ : Optional[int] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a__ , a__ : Union[str, Any] = gray_img.shape
# set different points to rotate image
a__ : int = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a__ : Tuple = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a__ : Union[str, Any] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a__ : Optional[Any] = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a__ : List[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a__ : str = plt.figure(1)
a__ : str = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 80 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 0 |
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _A ( lowercase , lowercase , lowercase=[] ):
"""simple docstring"""
a =size[0] - overlap_pixels * 2
a =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
a =np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
a =np.pad(lowercase , mode='''linear_ramp''' , pad_width=lowercase , end_values=0 )
if "l" in remove_borders:
a =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
a =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
a =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
a =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
return max(lowercase , min(lowercase , lowercase ) )
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =list(lowercase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
a =clamp_rect(lowercase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _A ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowercase , (original_slice, 0) )
return result
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
a =tile.crop(lowercase )
return tile
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =n % d
return n - divisor
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ) -> int:
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A , **__A ) -> Tuple:
torch.manual_seed(0 )
a =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
a =add_overlap_rect(__A , __A , image.size )
a =image.crop(__A )
a =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
a =translated_slice_x - (original_image_slice / 2)
a =max(0 , __A )
a =squeeze_tile(__A , __A , __A , __A )
a =to_input.size
a =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
a =super(__A , self ).__call__(image=__A , **__A ).images[0]
a =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
a =unsqueeze_tile(__A , __A )
a =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
a =[]
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
a =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode='''L''' , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ) -> Optional[int]:
a =Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
a =math.ceil(image.size[0] / tile_size )
a =math.ceil(image.size[1] / tile_size )
a =tcx * tcy
a =0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def _A ( ):
"""simple docstring"""
# Run a demo
a ='''stabilityai/stable-diffusion-x4-upscaler'''
a =StableDiffusionTiledUpscalePipeline.from_pretrained(lowercase , revision='''fp16''' , torch_dtype=torch.floataa )
a =pipe.to('''cuda''' )
a =Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(lowercase ):
print(f'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('''diffusers_library_progress.jpg''' )
a =pipe(image=lowercase , prompt='''Black font, white background, vector''' , noise_level=40 , callback=lowercase )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main() | 81 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
A__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
A__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A__ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
A__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = None
# source code of `config_class`
_lowerCAmelCase = inspect.getsource(snake_case )
_lowerCAmelCase = _re_checkpoint.findall(snake_case )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
_lowerCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_lowerCAmelCase = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_lowerCAmelCase = ckpt_name
break
return checkpoint
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_lowerCAmelCase = get_checkpoint_from_config_class(snake_case )
_lowerCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case )
if len(snake_case ) > 0:
_lowerCAmelCase = """\n""".join(sorted(snake_case ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 82 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = [0] * no_of_processes
_UpperCamelCase : Union[str, Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = burst_time[i]
_UpperCamelCase : list[int] = []
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Optional[Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_UpperCamelCase : str = []
_UpperCamelCase : str = -1
for i in range(UpperCAmelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
_UpperCamelCase : Optional[Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_UpperCamelCase : Optional[int] = i
total_time += burst_time[target_process]
completed += 1
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : int = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = [0] * no_of_processes
for i in range(UpperCAmelCase_ ):
_UpperCamelCase : Tuple = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
snake_case_ : List[Any] = 4
snake_case_ : List[str] = [2, 5, 3, 7]
snake_case_ : List[Any] = [0, 0, 0, 0]
snake_case_ : Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
snake_case_ : List[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 83 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Any = "Salesforce/blip-image-captioning-base"
UpperCAmelCase_ :str = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
UpperCAmelCase_ :List[Any] = "image_captioner"
UpperCAmelCase_ :Tuple = AutoModelForVisionaSeq
UpperCAmelCase_ :List[Any] = ["image"]
UpperCAmelCase_ :Union[str, Any] = ["text"]
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A ) -> List[Any]:
return self.pre_processor(images=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> str:
return self.model.generate(**__A )
def __lowerCAmelCase ( self , __A ) -> Tuple:
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0].strip()
| 84 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = number
while duplicate > 0:
snake_case_ , snake_case_ = divmod(snake_case , 1_0 )
fact_sum += factorial(snake_case )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
_SCREAMING_SNAKE_CASE : List[Any] = int(input("Enter number: ").strip())
print(
F"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
)
| 85 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
lowerCamelCase__ = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 86 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 0 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
def __init__( self : List[Any] , lowercase_ : bool = True ) -> None:
lowercase__ : dict[T, list[T]] = {} # dictionary of lists
lowercase__ : Union[str, Any] = directed
def __UpperCamelCase ( self : List[str] , lowercase_ : T , lowercase_ : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
self.adj_list[destination_vertex].append(lowercase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
lowercase__ : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase_ )
lowercase__ : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase__ : List[Any] = [destination_vertex]
lowercase__ : List[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
lowercase__ : Optional[int] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase__ : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase__ : Optional[Any] = [destination_vertex]
lowercase__ : Optional[int] = []
return self
def __repr__( self : Dict ) -> str:
return pformat(self.adj_list )
| 87 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__lowerCAmelCase : Tuple = random.Random()
def a__ ( A_, A_=1.0, A_=None, A_=None ):
'''simple docstring'''
if rng is None:
__magic_name__ = global_rng
__magic_name__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : int=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : Union[str, Any]=128 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : int=30 , UpperCamelCase__ : Optional[Any]=4_4100 , ) -> int:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = min_seq_length
__magic_name__ = max_seq_length
__magic_name__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__magic_name__ = spectrogram_length
__magic_name__ = feature_size
__magic_name__ = num_audio_channels
__magic_name__ = hop_length
__magic_name__ = chunk_length
__magic_name__ = sampling_rate
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowercase ( self : Any , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[Any]=False ) -> List[Any]:
"""simple docstring"""
def _flatten(UpperCamelCase__ : Tuple ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
__magic_name__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__magic_name__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__magic_name__ = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = TvltFeatureExtractor
def _lowercase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = TvltFeatureExtractionTester(self )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """spectrogram_length""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """feature_size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """num_audio_channels""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """hop_length""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """chunk_length""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """sampling_rate""" ) )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
__magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
__magic_name__ = feat_extract_first.to_dict()
__magic_name__ = feat_extract_second.to_dict()
__magic_name__ = dict_first.pop("""mel_filters""" )
__magic_name__ = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCamelCase__ )
__magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
__magic_name__ = feat_extract_first.to_dict()
__magic_name__ = feat_extract_second.to_dict()
__magic_name__ = dict_first.pop("""mel_filters""" )
__magic_name__ = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__magic_name__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__magic_name__ = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
__magic_name__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__magic_name__ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__magic_name__ = feature_extractor(
UpperCamelCase__ , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__magic_name__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowercase ( self : Tuple , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__magic_name__ = ds.sort("""id""" ).select(range(UpperCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self._load_datasamples(1 )
__magic_name__ = TvltFeatureExtractor()
__magic_name__ = feature_extractor(UpperCamelCase__ , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__magic_name__ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 88 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=_UpperCamelCase ):
lowerCAmelCase : str = ['note_seq']
def __init__( self : Tuple ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : str ):
requires_backends(self ,['note_seq'] )
@classmethod
def __lowercase ( cls : List[Any] ,*_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[Any] ):
requires_backends(cls ,['note_seq'] )
@classmethod
def __lowercase ( cls : Union[str, Any] ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Any ):
requires_backends(cls ,['note_seq'] )
| 89 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 0 |
from math import pi, sqrt, tan
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__lowerCamelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(UpperCamelCase__ , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__lowerCamelCase = (sidea + sidea + sidea) / 2
__lowerCamelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("\nSurface Areas of various geometric shapes: \n")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 90 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _A (__a , __a ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__a , __a ) ) )
def _A (__a , __a ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
SCREAMING_SNAKE_CASE_ : Tuple = (
'''Wrong input data\'s dimensions... '''
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(__a )
try:
if dataset.shape[1] != value_array.shape[1]:
SCREAMING_SNAKE_CASE_ : Any = (
'''Wrong input data\'s shape... '''
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(__a )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
SCREAMING_SNAKE_CASE_ : List[str] = (
'''Input data have different datatype... '''
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(__a )
SCREAMING_SNAKE_CASE_ : Any = []
for value in value_array:
SCREAMING_SNAKE_CASE_ : int = euclidean(__a , dataset[0] )
SCREAMING_SNAKE_CASE_ : List[str] = dataset[0].tolist()
for dataset_value in dataset[1:]:
SCREAMING_SNAKE_CASE_ : Optional[int] = euclidean(__a , __a )
if dist > temp_dist:
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_dist
SCREAMING_SNAKE_CASE_ : int = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _A (__a , __a ) -> float:
"""simple docstring"""
return np.dot(__a , __a ) / (norm(__a ) * norm(__a ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 0 |
import requests
from bsa import BeautifulSoup
def _a ( SCREAMING_SNAKE_CASE_ : str = "https://www.worldometers.info/coronavirus" ):
__lowerCAmelCase = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , "html.parser" )
__lowerCAmelCase = soup.findAll("h1" )
__lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 92 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase__ :
lowerCAmelCase_ = None
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Any = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : str = os.path.join(__SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Union[str, Any] = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.feature_extraction_class()
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 93 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 0 |
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 ):
"""simple docstring"""
a :List[str] = right or len(UpperCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase_ , UpperCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
_lowercase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_lowercase : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""})
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_lowercase : bool = field(default=UpperCamelCase__ , metadata={"""help""": """Set this flag to use fast tokenization."""})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCAmelCase :
_lowercase : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""})
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_lowercase : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
def _A ( ):
"""simple docstring"""
a__ : Tuple =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ : int =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ , a__ , a__ : Optional[Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
a__ : List[Any] =import_module("tasks" )
try:
a__ : Dict =getattr(SCREAMING_SNAKE_CASE , model_args.task_type )
a__ : TokenClassificationTask =token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a__ : Dict =token_classification_task.get_labels(data_args.labels )
a__ : Dict[int, str] =dict(enumerate(SCREAMING_SNAKE_CASE ) )
a__ : List[Any] =len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : List[Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
a__ : Tuple =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a__ : Optional[int] =AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
a__ : int =(
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a__ : Optional[Any] =(
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> Tuple[List[int], List[int]]:
a__ : Any =np.argmax(SCREAMING_SNAKE_CASE , axis=2 )
a__ , a__ : Dict =preds.shape
a__ : List[str] =[[] for _ in range(SCREAMING_SNAKE_CASE )]
a__ : Optional[int] =[[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE : EvalPrediction ) -> Dict:
a__ , a__ : Optional[Any] =align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"precision": precision_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"recall": recall_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"f1": fa_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
}
# Data collator
a__ : Any =DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a__ : str =Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : int ={}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : str =trainer.evaluate()
a__ : Any =os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write("%s = %s\n" % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
a__ : int =TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a__ , a__ , a__ : str =trainer.predict(SCREAMING_SNAKE_CASE )
a__ , a__ : str =align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Optional[int] =os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
a__ : Union[str, Any] =os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
def _A ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 95 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Tuple = 2
while i * i <= n:
_lowerCamelCase : Optional[int] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _snake_case ( ):
_lowerCamelCase : str = 1
_lowerCamelCase : Any = 1
while True:
i += 1
t_num += i
if count_divisors(lowercase__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution()) | 96 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 0 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__snake_case = None
try:
import msvcrt
except ImportError:
__snake_case = None
try:
import fcntl
except ImportError:
__snake_case = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__snake_case = OSError
# Data
# ------------------------------------------------
__snake_case = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
__snake_case = '''3.0.12'''
__snake_case = None
def a ( ) -> List[str]:
'''simple docstring'''
global _logger
UpperCamelCase__ :Any = _logger or logging.getLogger(__name__ )
return _logger
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = lock_file
return None
def __str__( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
self.lock.release()
return None
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=-1 , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[str] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase__ :Tuple = self.hash_filename_if_too_long(UpperCamelCase_ , UpperCamelCase_ )
# The path to the lock file.
UpperCamelCase__ :List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase__ :Union[str, Any] = None
# The default timeout value.
UpperCamelCase__ :Tuple = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase__ :List[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase__ :Union[str, Any] = 0
return None
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._lock_file
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = float(UpperCamelCase_ )
return None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
raise NotImplementedError()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCAmelCase__ ( self , UpperCamelCase_=None , UpperCamelCase_=0.05 ):
'''simple docstring'''
if timeout is None:
UpperCamelCase__ :Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase__ :Optional[Any] = id(self )
UpperCamelCase__ :Any = self._lock_file
UpperCamelCase__ :Optional[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(UpperCamelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase__ :Tuple = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase__ ( self , UpperCamelCase_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase__ :Dict = id(self )
UpperCamelCase__ :Optional[int] = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
UpperCamelCase__ :Any = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=UpperCamelCase_ )
return None
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = os.path.basename(UpperCamelCase_ )
if len(UpperCamelCase_ ) > max_length and max_length > 0:
UpperCamelCase__ :List[Any] = os.path.dirname(UpperCamelCase_ )
UpperCamelCase__ :str = str(hash(UpperCamelCase_ ) )
UpperCamelCase__ :List[str] = filename[: max_length - len(UpperCamelCase_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(UpperCamelCase_ , UpperCamelCase_ )
else:
return path
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=-1 , UpperCamelCase_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(UpperCamelCase_ , timeout=UpperCamelCase_ , max_filename_length=UpperCamelCase_ )
UpperCamelCase__ :Any = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase__ :Optional[int] = os.open(self._lock_file , UpperCamelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCamelCase_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(UpperCamelCase_ )
else:
UpperCamelCase__ :int = fd
return None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self._lock_file_fd
UpperCamelCase__ :Tuple = None
msvcrt.locking(UpperCamelCase_ , msvcrt.LK_UNLCK , 1 )
os.close(UpperCamelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=-1 , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = os.statvfs(os.path.dirname(UpperCamelCase_ ) ).f_namemax
super().__init__(UpperCamelCase_ , timeout=UpperCamelCase_ , max_filename_length=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase__ :Union[str, Any] = os.open(self._lock_file , UpperCamelCase_ )
try:
fcntl.flock(UpperCamelCase_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCamelCase_ )
else:
UpperCamelCase__ :Optional[int] = fd
return None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self._lock_file_fd
UpperCamelCase__ :str = None
fcntl.flock(UpperCamelCase_ , fcntl.LOCK_UN )
os.close(UpperCamelCase_ )
return None
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase__ :Tuple = os.open(self._lock_file , UpperCamelCase_ )
except OSError:
pass
else:
UpperCamelCase__ :int = fd
return None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase__ :List[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__snake_case = None
if msvcrt:
__snake_case = WindowsFileLock
elif fcntl:
__snake_case = UnixFileLock
else:
__snake_case = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''') | 97 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = StableDiffusionInpaintPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case__ = frozenset([] )
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=9 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=lowerCamelCase__ ,)
UpperCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act='gelu' ,projection_dim=512 ,)
UpperCAmelCase__ = CLIPTextModel(lowerCamelCase__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCAmelCase__ = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((64, 64) )
UpperCAmelCase__ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = StableDiffusionInpaintPipeline(**lowerCamelCase__ )
UpperCAmelCase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
UpperCAmelCase__ = sd_pipe(**lowerCamelCase__ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
UpperCAmelCase__ = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase__ ,safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,generator=lowerCamelCase__ ,output_type='np' ,)
UpperCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
UpperCAmelCase__ = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ ,torch_dtype=torch.floataa ,safety_checker=lowerCamelCase__ ,)
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,generator=lowerCamelCase__ ,output_type='np' ,)
UpperCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __lowerCAmelCase ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase__ = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase__ = PNDMScheduler.from_pretrained(lowerCamelCase__ ,subfolder='scheduler' )
UpperCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ ,safety_checker=lowerCamelCase__ ,scheduler=lowerCamelCase__ ,torch_dtype=torch.floataa ,)
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type='np' ,)
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 98 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase : List[Any] = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = True , ) -> Any:
'''simple docstring'''
a__ : int = [file for file in os.listdir(lowercase) if os.path.isfile(os.path.join(lowercase , lowercase))]
if identifier is not None:
a__ : int = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase , lowercase):
for n_ in n_identifier:
a__ : List[str] = [file for file in files if n_ not in file]
else:
a__ : Union[str, Any] = [file for file in files if n_identifier not in file]
a__ : Union[str, Any] = ignore_files or []
ignore_files.append('__init__.py')
a__ : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase)
if only_modules:
a__ : Optional[Any] = file.split('.')[0]
try:
a__ : Any = getattr(lowercase , lowercase)
a__ : Union[str, Any] = doctest.DocTestSuite(lowercase)
a__ : Tuple = unittest.TextTestRunner().run(lowercase)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F'{module_identifier} is not a module.')
else:
a__ : List[Any] = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = Path('src/transformers')
a__ : int = 'modeling'
a__ : List[str] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase , identifier=lowercase , ignore_files=lowercase)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : str = Path('src/transformers')
a__ : List[str] = 'tokenization'
self.analyze_directory(lowercase , identifier=lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : List[str] = Path('src/transformers')
a__ : List[str] = 'configuration'
self.analyze_directory(lowercase , identifier=lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = Path('src/transformers')
a__ : Dict = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase , n_identifier=lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = Path('docs/source')
a__ : int = ['favicon.ico']
self.analyze_directory(lowercase , ignore_files=lowercase , only_modules=lowercase)
| 99 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 0 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__magic_name__ = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _TestCommandArgs(dataset=UpperCamelCase_ , all_configs=UpperCamelCase_ , save_infos=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = TestCommand(*UpperCamelCase_ )
test_command.run()
__SCREAMING_SNAKE_CASE = os.path.join(UpperCamelCase_ , """README.md""" )
assert os.path.exists(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = DatasetInfosDict.from_directory(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 235_1563,
"""num_examples""": 1_0000,
},
{
"""name""": """validation""",
"""num_bytes""": 23_8418,
"""num_examples""": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = getattr(dataset_infos["""default"""] , UpperCamelCase_ ), getattr(expected_dataset_infos["""default"""] , UpperCamelCase_ )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase_ , UpperCamelCase_ )
elif key == "splits":
assert list(UpperCamelCase_ ) == list(UpperCamelCase_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 100 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =DiTPipeline
lowercase_ : Optional[Any] =CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ : List[str] =PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
lowercase_ : Optional[Any] =CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ : List[Any] =False
def A__ ( self):
torch.manual_seed(0)
lowercase = TransformeraDModel(
sample_size=1_6 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=A__ ,activation_fn='''gelu-approximate''' ,num_embeds_ada_norm=1_0_0_0 ,norm_type='''ada_norm_zero''' ,norm_elementwise_affine=A__ ,)
lowercase = AutoencoderKL()
lowercase = DDIMScheduler()
lowercase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = self.get_dummy_inputs(A__)
lowercase = pipe(**A__).images
lowercase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 1_6, 1_6, 3))
lowercase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457])
lowercase = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(A__ ,1E-3)
def A__ ( self):
self._test_inference_batch_single_identical(relax_max_difference=A__ ,expected_max_diff=1E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
def A__ ( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = torch.manual_seed(0)
lowercase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''')
pipe.to('''cuda''')
lowercase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
lowercase = pipe.get_label_ids(A__)
lowercase = pipe(A__ ,generator=A__ ,num_inference_steps=4_0 ,output_type='''np''').images
for word, image in zip(A__ ,A__):
lowercase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy')
assert np.abs((expected_image - image).max()) < 1E-2
def A__ ( self):
lowercase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''')
lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to('''cuda''')
lowercase = ['''vase''', '''umbrella''']
lowercase = pipe.get_label_ids(A__)
lowercase = torch.manual_seed(0)
lowercase = pipe(A__ ,generator=A__ ,num_inference_steps=2_5 ,output_type='''np''').images
for word, image in zip(A__ ,A__):
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
f'/dit/{word}_512.npy')
assert np.abs((expected_image - image).max()) < 1E-1
| 101 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 | 0 |
"""simple docstring"""
import functools
from typing import Any
def lowercase ( _snake_case : str , _snake_case : list[str] ) ->bool:
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or len(_snake_case ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(_snake_case , _snake_case ) or not all(
isinstance(_snake_case , _snake_case ) and len(_snake_case ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
__snake_case : dict[str, Any] = {}
__snake_case : int = '''WORD_KEEPER'''
for word in words:
__snake_case : List[Any] = trie
for c in word:
if c not in trie_node:
__snake_case : Union[str, Any] = {}
__snake_case : List[str] = trie_node[c]
__snake_case : List[str] = True
__snake_case : Dict = len(_snake_case )
# Dynamic programming method
@functools.cache
def is_breakable(_snake_case : int ) -> bool:
if index == len_string:
return True
__snake_case : List[str] = trie
for i in range(_snake_case , _snake_case ):
__snake_case : Dict = trie_node.get(string[i] , _snake_case )
if trie_node is None:
return False
if trie_node.get(_snake_case , _snake_case ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
_a = BartphoTokenizer
_a = False
_a = True
def UpperCAmelCase__ ( self : List[str]):
super().setUp()
lowerCAmelCase_ : str = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
lowerCAmelCase_ : int = dict(zip(A_ , range(len(A_))))
lowerCAmelCase_ : Any = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''])
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCAmelCase_ : List[Any] = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase__ ( self : Any , **A_ : Union[str, Any]):
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A_)
def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[Any]):
lowerCAmelCase_ : Union[str, Any] = '''This is a là test'''
lowerCAmelCase_ : Union[str, Any] = '''This is a<unk><unk> test'''
return input_text, output_text
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Any = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCAmelCase_ : Any = '''This is a là test'''
lowerCAmelCase_ : Optional[int] = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
lowerCAmelCase_ : int = tokenizer.tokenize(A_)
self.assertListEqual(A_ , A_)
lowerCAmelCase_ : Optional[int] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_) , A_)
| 103 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
lowerCAmelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
for attribute in key.split('''.''' ):
__lowercase = getattr(A__ , A__ )
if weight_type is not None:
__lowercase = getattr(A__ , A__ ).shape
else:
__lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == '''group''' , )
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
__lowercase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(A__ )[0].split('''.''' )[-2]
__lowercase = mapped_key.replace('''*''' , A__ )
if "weight_g" in name:
__lowercase = '''weight_g'''
elif "weight_v" in name:
__lowercase = '''weight_v'''
elif "bias" in name:
__lowercase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase = '''weight'''
else:
__lowercase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"Unused weights: {unused_weights}" )
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = full_name.split('''conv_layers.''' )[-1]
__lowercase = name.split('''.''' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A__ )
@torch.no_grad()
def _A ( A__ , A__ , A__=None , A__=None , A__=True ):
"""simple docstring"""
if config_path is not None:
__lowercase = UniSpeechSatConfig.from_pretrained(A__ )
else:
__lowercase = UniSpeechSatConfig()
__lowercase = ''''''
if is_finetuned:
__lowercase = UniSpeechSatForCTC(A__ )
else:
__lowercase = UniSpeechSatForPreTraining(A__ )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__lowercase = model[0].eval()
recursively_load_weights(A__ , A__ )
hf_wavavec.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 104 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : str = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase : Dict = 1.6021e-19 # units = C
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 0 |
from math import sqrt
def __magic_name__ ( A : int ):
'''simple docstring'''
a = 0
for i in range(1, int(sqrt(A ) + 1 ) ):
if n % i == 0 and i != sqrt(A ):
total += i + n // i
elif i == sqrt(A ):
total += i
return total - n
def __magic_name__ ( A : int = 10000 ):
'''simple docstring'''
a = sum(
i
for i in range(1, A )
if sum_of_divisors(sum_of_divisors(A ) ) == i and sum_of_divisors(A ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 107 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
A: Any = logging.get_logger(__name__)
A: Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A: Union[str, Any] = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
A: Tuple = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = ['input_ids', 'attention_mask']
__lowerCAmelCase : List[Any] = RobertaTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCAmelCase : int = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) )
UpperCAmelCase : List[str] = add_prefix_space
UpperCAmelCase : Optional[Any] = pre_tok_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = """post_processor"""
UpperCAmelCase : str = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase : Dict = tuple(state["""cls"""] )
UpperCAmelCase : Optional[Any] = False
if state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCAmelCase : Optional[int] = add_prefix_space
UpperCAmelCase : Union[str, Any] = True
if state.get("""trim_offsets""" , _SCREAMING_SNAKE_CASE ) != trim_offsets:
UpperCAmelCase : int = trim_offsets
UpperCAmelCase : Union[str, Any] = True
if changes_to_apply:
UpperCAmelCase : List[Any] = getattr(_SCREAMING_SNAKE_CASE , state.pop("""type""" ) )
UpperCAmelCase : Optional[int] = component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value
UpperCAmelCase : Any = value
def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : Tuple = kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 109 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'UserAgent': UserAgent().random}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = script.contents[0]
lowercase__ = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _a :
def __init__( self: List[str] , UpperCamelCase_: Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = f'https://www.instagram.com/{username}/'
lowercase__ = self.get_json()
def lowerCamelCase_ ( self: Any ) -> dict:
"""simple docstring"""
lowercase__ = requests.get(self.url , headers=UpperCamelCase_ ).text
lowercase__ = BeautifulSoup(UpperCamelCase_ , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: Union[str, Any] ) -> str:
"""simple docstring"""
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self: Tuple ) -> str:
"""simple docstring"""
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def lowerCamelCase_ ( self: Optional[int] ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def lowerCamelCase_ ( self: str ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def _a ( SCREAMING_SNAKE_CASE = "github" ):
"""simple docstring"""
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
lowercase__ = InstagramUser(SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('github')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 110 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A : Tuple = 2_5_6_0_4_7
A : int = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any =NllbTokenizer
__UpperCAmelCase : Tuple =NllbTokenizerFast
__UpperCAmelCase : Union[str, Any] =True
__UpperCAmelCase : List[str] =True
__UpperCAmelCase : int ={}
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = NllbTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = NllbTokenizer(a__ , keep_accents=a__ )
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(a__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def snake_case ( self ):
__lowerCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(a__ , **a__ )
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(a__ )
__lowerCAmelCase = tokenizer_p.save_pretrained(a__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__lowerCAmelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(a__ , a__ )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(a__ )
__lowerCAmelCase = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
# Save tokenizer rust, legacy_format=True
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(a__ , legacy_format=a__ )
__lowerCAmelCase = tokenizer_p.save_pretrained(a__ )
# Checks it save with the same files
self.assertSequenceEqual(a__ , a__ )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(a__ )
__lowerCAmelCase = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
# Save tokenizer rust, legacy_format=False
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(a__ , legacy_format=a__ )
__lowerCAmelCase = tokenizer_p.save_pretrained(a__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(a__ )
__lowerCAmelCase = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
@require_torch
def snake_case ( self ):
if not self.test_seqaseq:
return
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
__lowerCAmelCase = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for"
" Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
__lowerCAmelCase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
__lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=a__ , tgt_texts=a__ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
a__ , tgt_texts=a__ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=a__ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , a__ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def snake_case ( self ):
pass
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase = [AddedToken("<special>" , lstrip=a__ )]
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , **a__ )
__lowerCAmelCase = tokenizer_r.encode("Hey this is a <special> token" )
__lowerCAmelCase = tokenizer_r.encode("<special>" , add_special_tokens=a__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , **a__ , )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , **a__ )
__lowerCAmelCase = tokenizer_p.encode("Hey this is a <special> token" )
__lowerCAmelCase = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] ='facebook/nllb-200-distilled-600M'
__UpperCAmelCase : List[str] =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__UpperCAmelCase : List[Any] =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__UpperCAmelCase : List[Any] =[
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def snake_case ( cls ):
__lowerCAmelCase = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
__lowerCAmelCase = 1
return cls
def snake_case ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_60_57 )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a__ )
def snake_case ( self ):
self.assertIn(a__ , self.tokenizer.all_special_ids )
# fmt: off
__lowerCAmelCase = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
__lowerCAmelCase = self.tokenizer.decode(a__ , skip_special_tokens=a__ )
__lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
self.assertNotIn(self.tokenizer.eos_token , a__ )
def snake_case ( self ):
__lowerCAmelCase = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , a__ )
__lowerCAmelCase = 10
__lowerCAmelCase = self.tokenizer(a__ , max_length=a__ , truncation=a__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , a__ )
self.assertEqual(len(a__ ) , a__ )
def snake_case ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_62_03, 3] )
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a__ )
__lowerCAmelCase = NllbTokenizer.from_pretrained(a__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a__ )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__lowerCAmelCase = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(a__ , a__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a__ )
self.assertEqual(a__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(self.src_text , padding=a__ , truncation=a__ , max_length=3 , return_tensors="pt" )
__lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=10 , return_tensors="pt" )
__lowerCAmelCase = targets["input_ids"]
__lowerCAmelCase = shift_tokens_right(
a__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(a__ ) , {
# A, test, EOS, en_XX
"input_ids": [[25_60_47, 70, 73_56, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_60_57,
} , )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
__lowerCAmelCase = False
__lowerCAmelCase = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 57 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
__a = {
'facebook/mbart-large-en-ro': 1_0_2_4,
'facebook/mbart-large-cc25': 1_0_2_4,
}
# fmt: off
__a = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowercase__( _UpperCAmelCase ):
"""simple docstring"""
a :Dict = VOCAB_FILES_NAMES
a :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :str = PRETRAINED_VOCAB_FILES_MAP
a :int = ['input_ids', 'attention_mask']
a :Tuple = MBartTokenizer
a :List[int] = []
a :List[int] = []
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : List[str]="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE_ : int , ) -> str:
lowercase_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
vocab_file=a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , src_lang=a__ , tgt_lang=a__ , additional_special_tokens=a__ , **a__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowercase_ = {
lang_code: self.convert_tokens_to_ids(a__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ = src_lang if src_lang is not None else '''en_XX'''
lowercase_ = self.convert_tokens_to_ids(self._src_lang )
lowercase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self : Optional[int] ) -> Dict:
return self._src_lang
@src_lang.setter
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
lowercase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> Tuple:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> Tuple:
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[str] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ = src_lang
lowercase_ = self(a__ , add_special_tokens=a__ , return_tensors=a__ , **a__ )
lowercase_ = self.convert_tokens_to_ids(a__ )
lowercase_ = tgt_lang_id
return inputs
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str = "en_XX" , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : str = "ro_RO" , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> Dict:
lowercase_ = src_lang
lowercase_ = tgt_lang
return super().prepare_seqaseq_batch(a__ , a__ , **a__ )
def _lowercase ( self : str ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self : List[Any] ) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
lowercase_ = self.convert_tokens_to_ids(a__ )
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
lowercase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
lowercase_ = self.convert_tokens_to_ids(a__ )
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
lowercase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> str:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 30 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a_ ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
snake_case__ = AutoConfig.from_pretrained(snake_case_ )
snake_case__ = FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case_ )
snake_case__ = checkpoints.load_tax_checkpoint(snake_case_ )
snake_case__ = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
snake_case__ = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case__ = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
snake_case__ = f'''layers_{str(snake_case_ )}'''
# Self-Attention
snake_case__ = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
snake_case__ = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
snake_case__ = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
snake_case__ = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
snake_case__ = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
snake_case__ = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
snake_case__ = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
snake_case__ = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
snake_case__ = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
snake_case__ = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
snake_case__ = flax_model.params['encoder']['block'][str(snake_case_ )]['layer']
snake_case__ = tax_attention_key
snake_case__ = tax_attention_out
snake_case__ = tax_attention_query
snake_case__ = tax_attention_value
snake_case__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ = tax_global_layer_norm
if split_mlp_wi:
snake_case__ = tax_mlp_wi_a
snake_case__ = tax_mlp_wi_a
else:
snake_case__ = tax_mlp_wi
snake_case__ = tax_mlp_wo
snake_case__ = tax_mlp_layer_norm
snake_case__ = flax_model_encoder_layer_block
# Only for layer 0:
snake_case__ = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
snake_case__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
snake_case__ = tax_encoder_global_rel_embedding
# Assigning
snake_case__ = tax_model['target']['encoder']['encoder_norm']['scale']
snake_case__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case__ = f'''layers_{str(snake_case_ )}'''
# Self-Attention
snake_case__ = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
snake_case__ = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
snake_case__ = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
snake_case__ = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
snake_case__ = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
snake_case__ = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
snake_case__ = tax_enc_dec_attention_module['key']['kernel']
snake_case__ = tax_enc_dec_attention_module['out']['kernel']
snake_case__ = tax_enc_dec_attention_module['query']['kernel']
snake_case__ = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
snake_case__ = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
snake_case__ = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
snake_case__ = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
snake_case__ = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
snake_case__ = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
snake_case__ = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
snake_case__ = flax_model.params['decoder']['block'][str(snake_case_ )]['layer']
snake_case__ = tax_attention_key
snake_case__ = tax_attention_out
snake_case__ = tax_attention_query
snake_case__ = tax_attention_value
snake_case__ = tax_pre_attention_layer_norm
snake_case__ = tax_enc_dec_attention_key
snake_case__ = tax_enc_dec_attention_out
snake_case__ = tax_enc_dec_attention_query
snake_case__ = tax_enc_dec_attention_value
snake_case__ = tax_cross_layer_norm
if split_mlp_wi:
snake_case__ = tax_mlp_wi_a
snake_case__ = tax_mlp_wi_a
else:
snake_case__ = tax_mlp_wi
snake_case__ = tax_mlp_wo
snake_case__ = txa_mlp_layer_norm
snake_case__ = flax_model_decoder_layer_block
# Decoder Normalization
snake_case__ = tax_model['target']['decoder']['decoder_norm']['scale']
snake_case__ = txa_decoder_norm
# Only for layer 0:
snake_case__ = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
snake_case__ = tax_decoder_rel_embedding
# Token Embeddings
snake_case__ = tax_model['target']['token_embedder']['embedding']
snake_case__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case__ = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(snake_case_ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 307 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _A :
def __init__( self : List[str] , _A : str = "cpu" , _A : str = "openai/clip-vit-large-patch14" ) -> str:
"""simple docstring"""
lowercase : Optional[int] = device
lowercase : Any = CLIPTokenizerFast.from_pretrained(a__ )
lowercase : str = [0.48_145_466, 0.4_578_275, 0.40_821_073]
lowercase : Optional[int] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
lowercase : Optional[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowercase : Union[str, Any] = torchvision.transforms.Resize(224 )
lowercase : List[Any] = torchvision.transforms.CenterCrop(224 )
def __a ( self : Optional[Any] , _A : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = self.resize(a__ )
lowercase : Optional[Any] = self.center_crop(a__ )
lowercase : List[str] = self.normalize(a__ )
return images
def __call__( self : Union[str, Any] , _A : Tuple=None , _A : Tuple=None , **_A : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = self.tokenizer(text=a__ , **a__ )
lowercase : Any = self.preprocess_img(a__ )
lowercase : Optional[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _A ( nn.Module ):
def __init__( self : Optional[int] , _A : Dict=10 , _A : Dict=0.01 , _A : Union[str, Any]=None , _A : Any=None , _A : Optional[Any]=None , _A : Dict=None , _A : Union[str, Any]=None , _A : List[str]=None , _A : Union[str, Any]=False , _A : Dict=True , _A : Dict="image" , _A : Optional[Any]=True , _A : List[str]=False , _A : Optional[Any]=False , _A : List[str]=False , ) -> Any:
"""simple docstring"""
super().__init__()
lowercase : Optional[Any] = None
lowercase : List[Any] = device if device else get_device()
if vqgan:
lowercase : Tuple = vqgan
else:
lowercase : int = load_vqgan(self.device , conf_path=a__ , ckpt_path=a__ )
self.vqgan.eval()
if clip:
lowercase : Any = clip
else:
lowercase : List[str] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
lowercase : Tuple = ProcessorGradientFlow(device=self.device )
lowercase : str = iterations
lowercase : Optional[int] = lr
lowercase : Optional[Any] = log
lowercase : Union[str, Any] = make_grid
lowercase : List[Any] = return_val
lowercase : List[str] = quantize
lowercase : int = self.vqgan.decoder.z_shape
def __a ( self : str , _A : Optional[int]=None , _A : Tuple=None , _A : str=5 , _A : int=True ) -> str:
"""simple docstring"""
lowercase : Any = []
if output_path is None:
lowercase : int = '''./animation.gif'''
if input_path is None:
lowercase : List[Any] = self.save_path
lowercase : Optional[int] = sorted(glob(input_path + '''/*''' ) )
if not len(a__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(a__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
lowercase : List[Any] = total_duration / len(a__ )
lowercase : int = [frame_duration] * len(a__ )
if extend_frames:
lowercase : Dict = 1.5
lowercase : Any = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(a__ ) )
imageio.mimsave(a__ , a__ , duration=a__ )
print(f"""gif saved to {output_path}""" )
def __a ( self : List[Any] , _A : List[str]=None , _A : Optional[int]=None ) -> Tuple:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
lowercase : int = preprocess(Image.open(a__ ) , target_image_size=256 ).to(self.device )
lowercase : Union[str, Any] = preprocess_vqgan(a__ )
lowercase , *lowercase : Tuple = self.vqgan.encode(a__ )
return z
def __a ( self : List[str] , _A : int ) -> Dict:
"""simple docstring"""
lowercase : List[Any] = self.latent.detach().requires_grad_()
lowercase : List[str] = base_latent + transform_vector
if self.quantize:
lowercase , *lowercase : Optional[int] = self.vqgan.quantize(a__ )
else:
lowercase : int = trans_latent
return self.vqgan.decode(a__ )
def __a ( self : int , _A : Dict , _A : str , _A : str=None ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.clip_preprocessor(text=a__ , images=a__ , return_tensors='''pt''' , padding=a__ )
lowercase : Dict = self.clip(**a__ )
lowercase : Dict = clip_outputs.logits_per_image
if weights is not None:
lowercase : Dict = similarity_logits * weights
return similarity_logits.sum()
def __a ( self : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : int ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._get_clip_similarity(pos_prompts['''prompts'''] , a__ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
lowercase : Any = self._get_clip_similarity(neg_prompts['''prompts'''] , a__ , weights=neg_prompts['''weights'''] )
else:
lowercase : Any = torch.tensor([1] , device=self.device )
lowercase : Dict = -torch.log(a__ ) + torch.log(a__ )
return loss
def __a ( self : Any , _A : Optional[Any] , _A : List[str] , _A : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : int = torch.randn_like(self.latent , requires_grad=a__ , device=self.device )
lowercase : int = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowercase : List[Any] = self._add_vector(a__ )
lowercase : Tuple = loop_post_process(a__ )
lowercase : Any = self._get_CLIP_loss(a__ , a__ , a__ )
print('''CLIP loss''' , a__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=a__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __a ( self : int , _A : Optional[Any] , _A : Optional[int] , _A : List[Any] ) -> Any:
"""simple docstring"""
wandb.init(reinit=a__ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
lowercase : Optional[int] = Image.open(a__ )
lowercase : Any = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(a__ ) )
def __a ( self : Optional[int] , _A : int ) -> List[str]:
"""simple docstring"""
if not prompts:
return []
lowercase : str = []
lowercase : Optional[int] = []
if isinstance(a__ , a__ ):
lowercase : Optional[int] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(a__ , (tuple, list) ):
lowercase : Any = prompt[0]
lowercase : List[Any] = float(prompt[1] )
elif ":" in prompt:
lowercase , lowercase : List[Any] = prompt.split(''':''' )
lowercase : int = float(a__ )
else:
lowercase : int = prompt
lowercase : int = 1.0
processed_prompts.append(a__ )
weights.append(a__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a__ , device=self.device ),
}
def __a ( self : Optional[Any] , _A : Union[str, Any] , _A : Any=None , _A : List[Any]=None , _A : Optional[int]=True , _A : Any=False , _A : Any=True , _A : int=True , _A : List[str]=None , ) -> Any:
"""simple docstring"""
if image_path:
lowercase : List[str] = self._get_latent(a__ )
else:
lowercase : Union[str, Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(a__ , a__ , a__ )
assert pos_prompts, "You must provide at least one positive prompt."
lowercase : str = self.process_prompts(a__ )
lowercase : int = self.process_prompts(a__ )
if save_final and save_path is None:
lowercase : List[Any] = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
lowercase : Optional[Any] = save_path + '''_''' + get_timestamp()
os.makedirs(a__ )
lowercase : Tuple = save_path
lowercase : Tuple = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(a__ ) )
lowercase : Optional[Any] = loop_post_process(a__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a__ , a__ , a__ ) ):
if show_intermediate:
show_pil(a__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(a__ )} )
if show_final:
show_pil(a__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) ) | 308 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Tuple = "ZinengTang/tvlt-base"
_UpperCAmelCase : Any = tempfile.mkdtemp()
def _lowerCAmelCase ( self : str , **lowerCAmelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **a__ )
def _lowerCAmelCase ( self : List[Any] , **lowerCAmelCase__ : List[Any] ) -> int:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a__ )
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : int = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , a__ )
self.assertIsInstance(processor.image_processor , a__ )
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
_UpperCAmelCase : List[str] = np.ones([1_2_0_0_0] )
_UpperCAmelCase : Tuple = feature_extractor(a__ , return_tensors="np" )
_UpperCAmelCase : List[str] = processor(audio=a__ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : List[str] = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
_UpperCAmelCase : Any = np.ones([3, 2_2_4, 2_2_4] )
_UpperCAmelCase : Any = image_processor(a__ , return_tensors="np" )
_UpperCAmelCase : Any = processor(images=a__ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : List[Any] = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
_UpperCAmelCase : List[str] = np.ones([1_2_0_0_0] )
_UpperCAmelCase : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_UpperCAmelCase : Optional[int] = processor(audio=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , ) | 145 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _UpperCAmelCase , unittest.TestCase):
lowerCamelCase__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def snake_case__ ( self, __a=0):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 128, 128), rng=random.Random(a__))
_lowerCAmelCase : str = np.random.RandomState(a__)
_lowerCAmelCase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : int = self.get_dummy_inputs()
_lowerCAmelCase : int = pipe(**a__).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : int = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=a__)
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : Optional[int] = pipe(**a__).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : int = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a__)
# warmup pass to apply optimizations
_lowerCAmelCase : int = pipe(**self.get_dummy_inputs())
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : List[str] = pipe(**a__).images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Optional[int] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : List[Any] = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**a__).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : List[str] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
_lowerCAmelCase : Optional[Any] = pipe(**a__).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
_lowerCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : List[str] = pipe(**a__).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ort.SessionOptions()
_lowerCAmelCase : List[Any] = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_lowerCAmelCase : List[Any] = init_image.resize((768, 512))
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=a__, feature_extractor=a__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : Optional[int] = "A fantasy landscape, trending on artstation"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : int = pipe(
prompt=a__, image=a__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=a__, output_type="np", )
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
_lowerCAmelCase : str = init_image.resize((768, 512))
_lowerCAmelCase : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx")
_lowerCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=a__, safety_checker=a__, feature_extractor=a__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase : Dict = "A fantasy landscape, trending on artstation"
_lowerCAmelCase : int = np.random.RandomState(0)
_lowerCAmelCase : Optional[int] = pipe(
prompt=a__, image=a__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=a__, output_type="np", )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_lowerCAmelCase : Tuple = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 36 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a_ ( __snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =checkpoints.load_tax_checkpoint(snake_case_ )
lowerCamelCase_ =flatten_dict(snake_case_ )
return flax_params
def a_ ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCamelCase_ ={
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase_ ='''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(snake_case_ , snake_case_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(snake_case_ , snake_case_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , snake_case_ )
lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , snake_case_ )
lowerCamelCase_ =flax_dict[key]
lowerCamelCase_ ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase_ =torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase_ =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a_ ( __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any]=False , __snake_case : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_flax_param(snake_case_ )
if not use_large:
lowerCamelCase_ =PixaStructVisionConfig()
lowerCamelCase_ =PixaStructTextConfig()
else:
lowerCamelCase_ =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase_ =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=snake_case_ )
lowerCamelCase_ =PixaStructForConditionalGeneration(snake_case_ )
lowerCamelCase_ =rename_and_convert_flax_params(snake_case_ )
model.load_state_dict(snake_case_ )
lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCamelCase_ =PixaStructImageProcessor()
lowerCamelCase_ =PixaStructProcessor(image_processor=snake_case_ , tokenizer=snake_case_ )
if use_large:
lowerCamelCase_ =4096
lowerCamelCase_ =True
# mkdir if needed
os.makedirs(snake_case_ , exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
print('''Model saved in {}'''.format(snake_case_ ) )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a_ : Dict = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 75 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Tuple = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
a : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
a : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a : Dict = re.compile(r'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
a : Tuple = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: int = None
# source code of `config_class`
UpperCAmelCase_: Optional[Any] = inspect.getsource(snake_case_ )
UpperCAmelCase_: Optional[int] = _re_checkpoint.findall(snake_case_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
UpperCAmelCase_: Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase_: List[Any] = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase_: List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase_: Any = get_checkpoint_from_config_class(snake_case_ )
UpperCAmelCase_: Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case_ )
if len(snake_case_ ) > 0:
UpperCAmelCase_: List[Any] = """\n""".join(sorted(snake_case_ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 147 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.